diff options
Diffstat (limited to 'Testcases/vnc_api/gen/vnc_api_server_gen.py')
-rw-r--r-- | Testcases/vnc_api/gen/vnc_api_server_gen.py | 26142 |
1 files changed, 26142 insertions, 0 deletions
diff --git a/Testcases/vnc_api/gen/vnc_api_server_gen.py b/Testcases/vnc_api/gen/vnc_api_server_gen.py new file mode 100644 index 0000000..ac75e72 --- /dev/null +++ b/Testcases/vnc_api/gen/vnc_api_server_gen.py @@ -0,0 +1,26142 @@ + +# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit! + +from bottle import abort, request, response + +import gevent +import json +import uuid +from pprint import pformat + +import cfgm_common +from cfgm_common.rest import LinkObject, hdr_server_tenant +from cfgm_common.exceptions import * +from resource_xsd import * +from resource_common import * +from resource_server import * +import cStringIO +from lxml import etree + +all_resource_types = set([ + 'domain', + 'global-vrouter-config', + 'instance-ip', + 'network-policy', + 'loadbalancer-pool', + 'virtual-DNS-record', + 'route-target', + 'floating-ip', + 'floating-ip-pool', + 'physical-router', + 'bgp-router', + 'virtual-router', + 'config-root', + 'subnet', + 'global-system-config', + 'service-appliance', + 'service-instance', + 'namespace', + 'logical-interface', + 'route-table', + 'physical-interface', + 'access-control-list', + 'analytics-node', + 'virtual-DNS', + 'customer-attachment', + 'service-appliance-set', + 'config-node', + 'qos-queue', + 'virtual-machine', + 'interface-route-table', + 'service-template', + 'virtual-ip', + 'loadbalancer-member', + 'security-group', + 'provider-attachment', + 'virtual-machine-interface', + 'loadbalancer-healthmonitor', + 'virtual-network', + 'project', + 'qos-forwarding-class', + 'database-node', + 'routing-instance', + 'network-ipam', + 'logical-router', + ]) +class VncApiServerGen(object): + def __new__(cls, *args, **kwargs): + obj = super(VncApiServerGen, cls).__new__(cls, *args, **kwargs) + # leaf resource + obj.route('/domain/<id>', 'GET', obj.domain_http_get) + obj.route('/domain/<id>', 'PUT', obj.domain_http_put) + obj.route('/domain/<id>', 'DELETE', obj.domain_http_delete) + # collections + obj.route('/domains', 'POST', obj.domains_http_post) + obj.route('/domains', 'GET', obj.domains_http_get) + # leaf resource + obj.route('/global-vrouter-config/<id>', 'GET', obj.global_vrouter_config_http_get) + obj.route('/global-vrouter-config/<id>', 'PUT', obj.global_vrouter_config_http_put) + obj.route('/global-vrouter-config/<id>', 'DELETE', obj.global_vrouter_config_http_delete) + # collections + obj.route('/global-vrouter-configs', 'POST', obj.global_vrouter_configs_http_post) + obj.route('/global-vrouter-configs', 'GET', obj.global_vrouter_configs_http_get) + # leaf resource + obj.route('/instance-ip/<id>', 'GET', obj.instance_ip_http_get) + obj.route('/instance-ip/<id>', 'PUT', obj.instance_ip_http_put) + obj.route('/instance-ip/<id>', 'DELETE', obj.instance_ip_http_delete) + # collections + obj.route('/instance-ips', 'POST', obj.instance_ips_http_post) + obj.route('/instance-ips', 'GET', obj.instance_ips_http_get) + # leaf resource + obj.route('/network-policy/<id>', 'GET', obj.network_policy_http_get) + obj.route('/network-policy/<id>', 'PUT', obj.network_policy_http_put) + obj.route('/network-policy/<id>', 'DELETE', obj.network_policy_http_delete) + # collections + obj.route('/network-policys', 'POST', obj.network_policys_http_post) + obj.route('/network-policys', 'GET', obj.network_policys_http_get) + # leaf resource + obj.route('/loadbalancer-pool/<id>', 'GET', obj.loadbalancer_pool_http_get) + obj.route('/loadbalancer-pool/<id>', 'PUT', obj.loadbalancer_pool_http_put) + obj.route('/loadbalancer-pool/<id>', 'DELETE', obj.loadbalancer_pool_http_delete) + # collections + obj.route('/loadbalancer-pools', 'POST', obj.loadbalancer_pools_http_post) + obj.route('/loadbalancer-pools', 'GET', obj.loadbalancer_pools_http_get) + # leaf resource + obj.route('/virtual-DNS-record/<id>', 'GET', obj.virtual_DNS_record_http_get) + obj.route('/virtual-DNS-record/<id>', 'PUT', obj.virtual_DNS_record_http_put) + obj.route('/virtual-DNS-record/<id>', 'DELETE', obj.virtual_DNS_record_http_delete) + # collections + obj.route('/virtual-DNS-records', 'POST', obj.virtual_DNS_records_http_post) + obj.route('/virtual-DNS-records', 'GET', obj.virtual_DNS_records_http_get) + # leaf resource + obj.route('/route-target/<id>', 'GET', obj.route_target_http_get) + obj.route('/route-target/<id>', 'PUT', obj.route_target_http_put) + obj.route('/route-target/<id>', 'DELETE', obj.route_target_http_delete) + # collections + obj.route('/route-targets', 'POST', obj.route_targets_http_post) + obj.route('/route-targets', 'GET', obj.route_targets_http_get) + # leaf resource + obj.route('/floating-ip/<id>', 'GET', obj.floating_ip_http_get) + obj.route('/floating-ip/<id>', 'PUT', obj.floating_ip_http_put) + obj.route('/floating-ip/<id>', 'DELETE', obj.floating_ip_http_delete) + # collections + obj.route('/floating-ips', 'POST', obj.floating_ips_http_post) + obj.route('/floating-ips', 'GET', obj.floating_ips_http_get) + # leaf resource + obj.route('/floating-ip-pool/<id>', 'GET', obj.floating_ip_pool_http_get) + obj.route('/floating-ip-pool/<id>', 'PUT', obj.floating_ip_pool_http_put) + obj.route('/floating-ip-pool/<id>', 'DELETE', obj.floating_ip_pool_http_delete) + # collections + obj.route('/floating-ip-pools', 'POST', obj.floating_ip_pools_http_post) + obj.route('/floating-ip-pools', 'GET', obj.floating_ip_pools_http_get) + # leaf resource + obj.route('/physical-router/<id>', 'GET', obj.physical_router_http_get) + obj.route('/physical-router/<id>', 'PUT', obj.physical_router_http_put) + obj.route('/physical-router/<id>', 'DELETE', obj.physical_router_http_delete) + # collections + obj.route('/physical-routers', 'POST', obj.physical_routers_http_post) + obj.route('/physical-routers', 'GET', obj.physical_routers_http_get) + # leaf resource + obj.route('/bgp-router/<id>', 'GET', obj.bgp_router_http_get) + obj.route('/bgp-router/<id>', 'PUT', obj.bgp_router_http_put) + obj.route('/bgp-router/<id>', 'DELETE', obj.bgp_router_http_delete) + # collections + obj.route('/bgp-routers', 'POST', obj.bgp_routers_http_post) + obj.route('/bgp-routers', 'GET', obj.bgp_routers_http_get) + # leaf resource + obj.route('/virtual-router/<id>', 'GET', obj.virtual_router_http_get) + obj.route('/virtual-router/<id>', 'PUT', obj.virtual_router_http_put) + obj.route('/virtual-router/<id>', 'DELETE', obj.virtual_router_http_delete) + # collections + obj.route('/virtual-routers', 'POST', obj.virtual_routers_http_post) + obj.route('/virtual-routers', 'GET', obj.virtual_routers_http_get) + # leaf resource + obj.route('/subnet/<id>', 'GET', obj.subnet_http_get) + obj.route('/subnet/<id>', 'PUT', obj.subnet_http_put) + obj.route('/subnet/<id>', 'DELETE', obj.subnet_http_delete) + # collections + obj.route('/subnets', 'POST', obj.subnets_http_post) + obj.route('/subnets', 'GET', obj.subnets_http_get) + # leaf resource + obj.route('/global-system-config/<id>', 'GET', obj.global_system_config_http_get) + obj.route('/global-system-config/<id>', 'PUT', obj.global_system_config_http_put) + obj.route('/global-system-config/<id>', 'DELETE', obj.global_system_config_http_delete) + # collections + obj.route('/global-system-configs', 'POST', obj.global_system_configs_http_post) + obj.route('/global-system-configs', 'GET', obj.global_system_configs_http_get) + # leaf resource + obj.route('/service-appliance/<id>', 'GET', obj.service_appliance_http_get) + obj.route('/service-appliance/<id>', 'PUT', obj.service_appliance_http_put) + obj.route('/service-appliance/<id>', 'DELETE', obj.service_appliance_http_delete) + # collections + obj.route('/service-appliances', 'POST', obj.service_appliances_http_post) + obj.route('/service-appliances', 'GET', obj.service_appliances_http_get) + # leaf resource + obj.route('/service-instance/<id>', 'GET', obj.service_instance_http_get) + obj.route('/service-instance/<id>', 'PUT', obj.service_instance_http_put) + obj.route('/service-instance/<id>', 'DELETE', obj.service_instance_http_delete) + # collections + obj.route('/service-instances', 'POST', obj.service_instances_http_post) + obj.route('/service-instances', 'GET', obj.service_instances_http_get) + # leaf resource + obj.route('/namespace/<id>', 'GET', obj.namespace_http_get) + obj.route('/namespace/<id>', 'PUT', obj.namespace_http_put) + obj.route('/namespace/<id>', 'DELETE', obj.namespace_http_delete) + # collections + obj.route('/namespaces', 'POST', obj.namespaces_http_post) + obj.route('/namespaces', 'GET', obj.namespaces_http_get) + # leaf resource + obj.route('/logical-interface/<id>', 'GET', obj.logical_interface_http_get) + obj.route('/logical-interface/<id>', 'PUT', obj.logical_interface_http_put) + obj.route('/logical-interface/<id>', 'DELETE', obj.logical_interface_http_delete) + # collections + obj.route('/logical-interfaces', 'POST', obj.logical_interfaces_http_post) + obj.route('/logical-interfaces', 'GET', obj.logical_interfaces_http_get) + # leaf resource + obj.route('/route-table/<id>', 'GET', obj.route_table_http_get) + obj.route('/route-table/<id>', 'PUT', obj.route_table_http_put) + obj.route('/route-table/<id>', 'DELETE', obj.route_table_http_delete) + # collections + obj.route('/route-tables', 'POST', obj.route_tables_http_post) + obj.route('/route-tables', 'GET', obj.route_tables_http_get) + # leaf resource + obj.route('/physical-interface/<id>', 'GET', obj.physical_interface_http_get) + obj.route('/physical-interface/<id>', 'PUT', obj.physical_interface_http_put) + obj.route('/physical-interface/<id>', 'DELETE', obj.physical_interface_http_delete) + # collections + obj.route('/physical-interfaces', 'POST', obj.physical_interfaces_http_post) + obj.route('/physical-interfaces', 'GET', obj.physical_interfaces_http_get) + # leaf resource + obj.route('/access-control-list/<id>', 'GET', obj.access_control_list_http_get) + obj.route('/access-control-list/<id>', 'PUT', obj.access_control_list_http_put) + obj.route('/access-control-list/<id>', 'DELETE', obj.access_control_list_http_delete) + # collections + obj.route('/access-control-lists', 'POST', obj.access_control_lists_http_post) + obj.route('/access-control-lists', 'GET', obj.access_control_lists_http_get) + # leaf resource + obj.route('/analytics-node/<id>', 'GET', obj.analytics_node_http_get) + obj.route('/analytics-node/<id>', 'PUT', obj.analytics_node_http_put) + obj.route('/analytics-node/<id>', 'DELETE', obj.analytics_node_http_delete) + # collections + obj.route('/analytics-nodes', 'POST', obj.analytics_nodes_http_post) + obj.route('/analytics-nodes', 'GET', obj.analytics_nodes_http_get) + # leaf resource + obj.route('/virtual-DNS/<id>', 'GET', obj.virtual_DNS_http_get) + obj.route('/virtual-DNS/<id>', 'PUT', obj.virtual_DNS_http_put) + obj.route('/virtual-DNS/<id>', 'DELETE', obj.virtual_DNS_http_delete) + # collections + obj.route('/virtual-DNSs', 'POST', obj.virtual_DNSs_http_post) + obj.route('/virtual-DNSs', 'GET', obj.virtual_DNSs_http_get) + # leaf resource + obj.route('/customer-attachment/<id>', 'GET', obj.customer_attachment_http_get) + obj.route('/customer-attachment/<id>', 'PUT', obj.customer_attachment_http_put) + obj.route('/customer-attachment/<id>', 'DELETE', obj.customer_attachment_http_delete) + # collections + obj.route('/customer-attachments', 'POST', obj.customer_attachments_http_post) + obj.route('/customer-attachments', 'GET', obj.customer_attachments_http_get) + # leaf resource + obj.route('/service-appliance-set/<id>', 'GET', obj.service_appliance_set_http_get) + obj.route('/service-appliance-set/<id>', 'PUT', obj.service_appliance_set_http_put) + obj.route('/service-appliance-set/<id>', 'DELETE', obj.service_appliance_set_http_delete) + # collections + obj.route('/service-appliance-sets', 'POST', obj.service_appliance_sets_http_post) + obj.route('/service-appliance-sets', 'GET', obj.service_appliance_sets_http_get) + # leaf resource + obj.route('/config-node/<id>', 'GET', obj.config_node_http_get) + obj.route('/config-node/<id>', 'PUT', obj.config_node_http_put) + obj.route('/config-node/<id>', 'DELETE', obj.config_node_http_delete) + # collections + obj.route('/config-nodes', 'POST', obj.config_nodes_http_post) + obj.route('/config-nodes', 'GET', obj.config_nodes_http_get) + # leaf resource + obj.route('/qos-queue/<id>', 'GET', obj.qos_queue_http_get) + obj.route('/qos-queue/<id>', 'PUT', obj.qos_queue_http_put) + obj.route('/qos-queue/<id>', 'DELETE', obj.qos_queue_http_delete) + # collections + obj.route('/qos-queues', 'POST', obj.qos_queues_http_post) + obj.route('/qos-queues', 'GET', obj.qos_queues_http_get) + # leaf resource + obj.route('/virtual-machine/<id>', 'GET', obj.virtual_machine_http_get) + obj.route('/virtual-machine/<id>', 'PUT', obj.virtual_machine_http_put) + obj.route('/virtual-machine/<id>', 'DELETE', obj.virtual_machine_http_delete) + # collections + obj.route('/virtual-machines', 'POST', obj.virtual_machines_http_post) + obj.route('/virtual-machines', 'GET', obj.virtual_machines_http_get) + # leaf resource + obj.route('/interface-route-table/<id>', 'GET', obj.interface_route_table_http_get) + obj.route('/interface-route-table/<id>', 'PUT', obj.interface_route_table_http_put) + obj.route('/interface-route-table/<id>', 'DELETE', obj.interface_route_table_http_delete) + # collections + obj.route('/interface-route-tables', 'POST', obj.interface_route_tables_http_post) + obj.route('/interface-route-tables', 'GET', obj.interface_route_tables_http_get) + # leaf resource + obj.route('/service-template/<id>', 'GET', obj.service_template_http_get) + obj.route('/service-template/<id>', 'PUT', obj.service_template_http_put) + obj.route('/service-template/<id>', 'DELETE', obj.service_template_http_delete) + # collections + obj.route('/service-templates', 'POST', obj.service_templates_http_post) + obj.route('/service-templates', 'GET', obj.service_templates_http_get) + # leaf resource + obj.route('/virtual-ip/<id>', 'GET', obj.virtual_ip_http_get) + obj.route('/virtual-ip/<id>', 'PUT', obj.virtual_ip_http_put) + obj.route('/virtual-ip/<id>', 'DELETE', obj.virtual_ip_http_delete) + # collections + obj.route('/virtual-ips', 'POST', obj.virtual_ips_http_post) + obj.route('/virtual-ips', 'GET', obj.virtual_ips_http_get) + # leaf resource + obj.route('/loadbalancer-member/<id>', 'GET', obj.loadbalancer_member_http_get) + obj.route('/loadbalancer-member/<id>', 'PUT', obj.loadbalancer_member_http_put) + obj.route('/loadbalancer-member/<id>', 'DELETE', obj.loadbalancer_member_http_delete) + # collections + obj.route('/loadbalancer-members', 'POST', obj.loadbalancer_members_http_post) + obj.route('/loadbalancer-members', 'GET', obj.loadbalancer_members_http_get) + # leaf resource + obj.route('/security-group/<id>', 'GET', obj.security_group_http_get) + obj.route('/security-group/<id>', 'PUT', obj.security_group_http_put) + obj.route('/security-group/<id>', 'DELETE', obj.security_group_http_delete) + # collections + obj.route('/security-groups', 'POST', obj.security_groups_http_post) + obj.route('/security-groups', 'GET', obj.security_groups_http_get) + # leaf resource + obj.route('/provider-attachment/<id>', 'GET', obj.provider_attachment_http_get) + obj.route('/provider-attachment/<id>', 'PUT', obj.provider_attachment_http_put) + obj.route('/provider-attachment/<id>', 'DELETE', obj.provider_attachment_http_delete) + # collections + obj.route('/provider-attachments', 'POST', obj.provider_attachments_http_post) + obj.route('/provider-attachments', 'GET', obj.provider_attachments_http_get) + # leaf resource + obj.route('/virtual-machine-interface/<id>', 'GET', obj.virtual_machine_interface_http_get) + obj.route('/virtual-machine-interface/<id>', 'PUT', obj.virtual_machine_interface_http_put) + obj.route('/virtual-machine-interface/<id>', 'DELETE', obj.virtual_machine_interface_http_delete) + # collections + obj.route('/virtual-machine-interfaces', 'POST', obj.virtual_machine_interfaces_http_post) + obj.route('/virtual-machine-interfaces', 'GET', obj.virtual_machine_interfaces_http_get) + # leaf resource + obj.route('/loadbalancer-healthmonitor/<id>', 'GET', obj.loadbalancer_healthmonitor_http_get) + obj.route('/loadbalancer-healthmonitor/<id>', 'PUT', obj.loadbalancer_healthmonitor_http_put) + obj.route('/loadbalancer-healthmonitor/<id>', 'DELETE', obj.loadbalancer_healthmonitor_http_delete) + # collections + obj.route('/loadbalancer-healthmonitors', 'POST', obj.loadbalancer_healthmonitors_http_post) + obj.route('/loadbalancer-healthmonitors', 'GET', obj.loadbalancer_healthmonitors_http_get) + # leaf resource + obj.route('/virtual-network/<id>', 'GET', obj.virtual_network_http_get) + obj.route('/virtual-network/<id>', 'PUT', obj.virtual_network_http_put) + obj.route('/virtual-network/<id>', 'DELETE', obj.virtual_network_http_delete) + # collections + obj.route('/virtual-networks', 'POST', obj.virtual_networks_http_post) + obj.route('/virtual-networks', 'GET', obj.virtual_networks_http_get) + # leaf resource + obj.route('/project/<id>', 'GET', obj.project_http_get) + obj.route('/project/<id>', 'PUT', obj.project_http_put) + obj.route('/project/<id>', 'DELETE', obj.project_http_delete) + # collections + obj.route('/projects', 'POST', obj.projects_http_post) + obj.route('/projects', 'GET', obj.projects_http_get) + # leaf resource + obj.route('/qos-forwarding-class/<id>', 'GET', obj.qos_forwarding_class_http_get) + obj.route('/qos-forwarding-class/<id>', 'PUT', obj.qos_forwarding_class_http_put) + obj.route('/qos-forwarding-class/<id>', 'DELETE', obj.qos_forwarding_class_http_delete) + # collections + obj.route('/qos-forwarding-classs', 'POST', obj.qos_forwarding_classs_http_post) + obj.route('/qos-forwarding-classs', 'GET', obj.qos_forwarding_classs_http_get) + # leaf resource + obj.route('/database-node/<id>', 'GET', obj.database_node_http_get) + obj.route('/database-node/<id>', 'PUT', obj.database_node_http_put) + obj.route('/database-node/<id>', 'DELETE', obj.database_node_http_delete) + # collections + obj.route('/database-nodes', 'POST', obj.database_nodes_http_post) + obj.route('/database-nodes', 'GET', obj.database_nodes_http_get) + # leaf resource + obj.route('/routing-instance/<id>', 'GET', obj.routing_instance_http_get) + obj.route('/routing-instance/<id>', 'PUT', obj.routing_instance_http_put) + obj.route('/routing-instance/<id>', 'DELETE', obj.routing_instance_http_delete) + # collections + obj.route('/routing-instances', 'POST', obj.routing_instances_http_post) + obj.route('/routing-instances', 'GET', obj.routing_instances_http_get) + # leaf resource + obj.route('/network-ipam/<id>', 'GET', obj.network_ipam_http_get) + obj.route('/network-ipam/<id>', 'PUT', obj.network_ipam_http_put) + obj.route('/network-ipam/<id>', 'DELETE', obj.network_ipam_http_delete) + # collections + obj.route('/network-ipams', 'POST', obj.network_ipams_http_post) + obj.route('/network-ipams', 'GET', obj.network_ipams_http_get) + # leaf resource + obj.route('/logical-router/<id>', 'GET', obj.logical_router_http_get) + obj.route('/logical-router/<id>', 'PUT', obj.logical_router_http_put) + obj.route('/logical-router/<id>', 'DELETE', obj.logical_router_http_delete) + # collections + obj.route('/logical-routers', 'POST', obj.logical_routers_http_post) + obj.route('/logical-routers', 'GET', obj.logical_routers_http_get) + return obj + #end __new__ + + def __init__(self): + self._db_conn = None + self._get_common = None + self._post_common = None + + self._resource_classes = {} + self._resource_classes['domain'] = DomainServerGen + + self._resource_classes['global-vrouter-config'] = GlobalVrouterConfigServerGen + + self._resource_classes['instance-ip'] = InstanceIpServerGen + + self._resource_classes['network-policy'] = NetworkPolicyServerGen + + self._resource_classes['loadbalancer-pool'] = LoadbalancerPoolServerGen + + self._resource_classes['virtual-DNS-record'] = VirtualDnsRecordServerGen + + self._resource_classes['route-target'] = RouteTargetServerGen + + self._resource_classes['floating-ip'] = FloatingIpServerGen + + self._resource_classes['floating-ip-pool'] = FloatingIpPoolServerGen + + self._resource_classes['physical-router'] = PhysicalRouterServerGen + + self._resource_classes['bgp-router'] = BgpRouterServerGen + + self._resource_classes['virtual-router'] = VirtualRouterServerGen + + self._resource_classes['config-root'] = ConfigRootServerGen + + self._resource_classes['subnet'] = SubnetServerGen + + self._resource_classes['global-system-config'] = GlobalSystemConfigServerGen + + self._resource_classes['service-appliance'] = ServiceApplianceServerGen + + self._resource_classes['service-instance'] = ServiceInstanceServerGen + + self._resource_classes['namespace'] = NamespaceServerGen + + self._resource_classes['logical-interface'] = LogicalInterfaceServerGen + + self._resource_classes['route-table'] = RouteTableServerGen + + self._resource_classes['physical-interface'] = PhysicalInterfaceServerGen + + self._resource_classes['access-control-list'] = AccessControlListServerGen + + self._resource_classes['analytics-node'] = AnalyticsNodeServerGen + + self._resource_classes['virtual-DNS'] = VirtualDnsServerGen + + self._resource_classes['customer-attachment'] = CustomerAttachmentServerGen + + self._resource_classes['service-appliance-set'] = ServiceApplianceSetServerGen + + self._resource_classes['config-node'] = ConfigNodeServerGen + + self._resource_classes['qos-queue'] = QosQueueServerGen + + self._resource_classes['virtual-machine'] = VirtualMachineServerGen + + self._resource_classes['interface-route-table'] = InterfaceRouteTableServerGen + + self._resource_classes['service-template'] = ServiceTemplateServerGen + + self._resource_classes['virtual-ip'] = VirtualIpServerGen + + self._resource_classes['loadbalancer-member'] = LoadbalancerMemberServerGen + + self._resource_classes['security-group'] = SecurityGroupServerGen + + self._resource_classes['provider-attachment'] = ProviderAttachmentServerGen + + self._resource_classes['virtual-machine-interface'] = VirtualMachineInterfaceServerGen + + self._resource_classes['loadbalancer-healthmonitor'] = LoadbalancerHealthmonitorServerGen + + self._resource_classes['virtual-network'] = VirtualNetworkServerGen + + self._resource_classes['project'] = ProjectServerGen + + self._resource_classes['qos-forwarding-class'] = QosForwardingClassServerGen + + self._resource_classes['database-node'] = DatabaseNodeServerGen + + self._resource_classes['routing-instance'] = RoutingInstanceServerGen + + self._resource_classes['network-ipam'] = NetworkIpamServerGen + + self._resource_classes['logical-router'] = LogicalRouterServerGen + + + # Generate LinkObjects for all entities + links = [] + # Link for root + links.append(LinkObject('root', self._base_url , '/config-root', + 'config-root')) + + # Link for collections + link = LinkObject('collection', + self._base_url , '/domains', + 'domain') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/global-vrouter-configs', + 'global-vrouter-config') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/instance-ips', + 'instance-ip') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/network-policys', + 'network-policy') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/loadbalancer-pools', + 'loadbalancer-pool') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/virtual-DNS-records', + 'virtual-DNS-record') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/route-targets', + 'route-target') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/floating-ips', + 'floating-ip') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/floating-ip-pools', + 'floating-ip-pool') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/physical-routers', + 'physical-router') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/bgp-routers', + 'bgp-router') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/virtual-routers', + 'virtual-router') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/subnets', + 'subnet') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/global-system-configs', + 'global-system-config') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/service-appliances', + 'service-appliance') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/service-instances', + 'service-instance') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/namespaces', + 'namespace') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/logical-interfaces', + 'logical-interface') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/route-tables', + 'route-table') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/physical-interfaces', + 'physical-interface') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/access-control-lists', + 'access-control-list') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/analytics-nodes', + 'analytics-node') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/virtual-DNSs', + 'virtual-DNS') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/customer-attachments', + 'customer-attachment') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/service-appliance-sets', + 'service-appliance-set') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/config-nodes', + 'config-node') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/qos-queues', + 'qos-queue') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/virtual-machines', + 'virtual-machine') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/interface-route-tables', + 'interface-route-table') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/service-templates', + 'service-template') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/virtual-ips', + 'virtual-ip') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/loadbalancer-members', + 'loadbalancer-member') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/security-groups', + 'security-group') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/provider-attachments', + 'provider-attachment') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/virtual-machine-interfaces', + 'virtual-machine-interface') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/loadbalancer-healthmonitors', + 'loadbalancer-healthmonitor') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/virtual-networks', + 'virtual-network') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/projects', + 'project') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/qos-forwarding-classs', + 'qos-forwarding-class') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/database-nodes', + 'database-node') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/routing-instances', + 'routing-instance') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/network-ipams', + 'network-ipam') + links.append(link) + + link = LinkObject('collection', + self._base_url , '/logical-routers', + 'logical-router') + links.append(link) + + # Link for Resources Base + link = LinkObject('resource-base', + self._base_url , '/domain', + 'domain') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/global-vrouter-config', + 'global-vrouter-config') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/instance-ip', + 'instance-ip') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/network-policy', + 'network-policy') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/loadbalancer-pool', + 'loadbalancer-pool') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/virtual-DNS-record', + 'virtual-DNS-record') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/route-target', + 'route-target') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/floating-ip', + 'floating-ip') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/floating-ip-pool', + 'floating-ip-pool') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/physical-router', + 'physical-router') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/bgp-router', + 'bgp-router') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/virtual-router', + 'virtual-router') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/config-root', + 'config-root') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/subnet', + 'subnet') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/global-system-config', + 'global-system-config') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/service-appliance', + 'service-appliance') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/service-instance', + 'service-instance') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/namespace', + 'namespace') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/logical-interface', + 'logical-interface') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/route-table', + 'route-table') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/physical-interface', + 'physical-interface') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/access-control-list', + 'access-control-list') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/analytics-node', + 'analytics-node') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/virtual-DNS', + 'virtual-DNS') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/customer-attachment', + 'customer-attachment') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/service-appliance-set', + 'service-appliance-set') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/config-node', + 'config-node') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/qos-queue', + 'qos-queue') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/virtual-machine', + 'virtual-machine') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/interface-route-table', + 'interface-route-table') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/service-template', + 'service-template') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/virtual-ip', + 'virtual-ip') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/loadbalancer-member', + 'loadbalancer-member') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/security-group', + 'security-group') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/provider-attachment', + 'provider-attachment') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/virtual-machine-interface', + 'virtual-machine-interface') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/loadbalancer-healthmonitor', + 'loadbalancer-healthmonitor') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/virtual-network', + 'virtual-network') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/project', + 'project') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/qos-forwarding-class', + 'qos-forwarding-class') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/database-node', + 'database-node') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/routing-instance', + 'routing-instance') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/network-ipam', + 'network-ipam') + links.append(link) + link = LinkObject('resource-base', + self._base_url , '/logical-router', + 'logical-router') + links.append(link) + self._homepage_links = links + #end __init__ + + def is_admin_request(self): + env = request.headers.environ + for field in ('HTTP_X_API_ROLE', 'HTTP_X_ROLE'): + if field in env: + roles = env[field].split(',') + return 'admin' in [x.lower() for x in roles] + return False + + def domain_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_domain_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'domain': + abort(404, 'No domain object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'domain', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('domain') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'domain', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'domain_limits', u'api_access_list', u'id_perms', u'display_name'] + references = [] + back_references = [u'config_root_back_refs'] + children = [u'projects', u'namespaces', 'service_templates', u'virtual_DNSs'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('domain', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'domain', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'domain', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('domain', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_domain_read', id, rsp_body) + except Exception as e: + pass + + return {'domain': rsp_body} + #end domain_http_get + + def domain_http_put(self, id): + key = 'domain' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_domain_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'domain': + abort(404, 'No domain object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('domain_limits') + if prop_dict: + buf = cStringIO.StringIO() + xx_domain_limits = DomainLimitsType(**prop_dict) + xx_domain_limits.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_domain_limits = DomainLimitsType() + try: + xx_domain_limits.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('api_access_list') + if prop_dict: + buf = cStringIO.StringIO() + xx_api_access_list = ApiAccessListType(**prop_dict) + xx_api_access_list.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_api_access_list = ApiAccessListType() + try: + xx_api_access_list.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'domain', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'domain', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'project', u'namespace', 'service_template', u'virtual_DNS'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('domain') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'domain', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('domain', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'domain', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('domain', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_domain_update', id, obj_dict) + except Exception as e: + pass + + return {'domain': rsp_body} + #end domain_http_put + + def domain_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'domain': + abort(404, 'No domain object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_domain_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'config_root_back_refs'] + children = [u'projects', u'namespaces', 'service_templates', u'virtual_DNSs'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('domain', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'domain', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'domain', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'domain', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('domain', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('domain') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + projects = read_result.get('projects', None) + if projects: + has_infos = read_result['projects'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-project')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'domain', 'http_delete', err_msg) + abort(409, err_msg) + + namespaces = read_result.get('namespaces', None) + if namespaces: + has_infos = read_result['namespaces'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-namespace')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'domain', 'http_delete', err_msg) + abort(409, err_msg) + + service_templates = read_result.get('service_templates', None) + if service_templates: + has_infos = read_result['service_templates'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-service-template')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'domain', 'http_delete', err_msg) + abort(409, err_msg) + + virtual_DNSs = read_result.get('virtual_DNSs', None) + if virtual_DNSs: + has_infos = read_result['virtual_DNSs'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-DNS')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'domain', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._domain_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'domain', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('domain', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'domain', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_domain_delete', id, read_result) + except Exception as e: + pass + + #end domain_http_delete + + def domains_http_post(self): + key = 'domain' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_domain_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('domain_limits') + if prop_dict: + buf = cStringIO.StringIO() + xx_domain_limits = DomainLimitsType(**prop_dict) + xx_domain_limits.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_domain_limits = DomainLimitsType() + try: + xx_domain_limits.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('api_access_list') + if prop_dict: + buf = cStringIO.StringIO() + xx_api_access_list = ApiAccessListType(**prop_dict) + xx_api_access_list.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_api_access_list = ApiAccessListType() + try: + xx_api_access_list.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'domain', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'domain', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'domain', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'project', u'namespace', 'service_template', u'virtual_DNS'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('domain', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'domain', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['domain', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('domain') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'domain', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('domain', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'domain', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('domain', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_domain_create', obj_dict) + except Exception as e: + pass + + return {'domain': rsp_body} + #end domains_http_post + + def domains_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'domains', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('domain', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'domains', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'domains': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('domain', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('domain', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('domain', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'domain_limits', u'api_access_list', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('domain', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('domain', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'domain': obj_dict}) + + return {'domains': obj_dicts} + #end domains_http_get + + def _domain_create_default_children(self, parent_obj): + # Create a default child only if provisioned for + r_class = self.get_resource_class('project') + if r_class and r_class.generate_default_instance: + child_obj = Project(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('project') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('project', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('project', obj_ids, child_dict) + self._project_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('namespace') + if r_class and r_class.generate_default_instance: + child_obj = Namespace(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('namespace') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('namespace', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('namespace', obj_ids, child_dict) + self._namespace_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('service-template') + if r_class and r_class.generate_default_instance: + child_obj = ServiceTemplate(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('service-template') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('service-template', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('service-template', obj_ids, child_dict) + self._service_template_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('virtual-DNS') + if r_class and r_class.generate_default_instance: + child_obj = VirtualDns(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('virtual-DNS') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('virtual-DNS', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('virtual-DNS', obj_ids, child_dict) + self._virtual_DNS_create_default_children(child_obj) + + pass + #end _domain_create_default_children + + def _domain_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-DNS') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('projects') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-project': + default_child_id = has_info['href'].split('/')[-1] + self.project_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-DNS') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('namespaces') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-namespace': + default_child_id = has_info['href'].split('/')[-1] + self.namespace_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-DNS') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('service_templates') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-service-template': + default_child_id = has_info['href'].split('/')[-1] + self.service_template_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-DNS') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('virtual_DNSs') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-virtual-DNS': + default_child_id = has_info['href'].split('/')[-1] + self.virtual_DNS_http_delete(default_child_id) + break + + pass + #end _domain_delete_default_children + + def global_vrouter_config_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_global_vrouter_config_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'global_vrouter_config': + abort(404, 'No global-vrouter-config object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'global_vrouter_config', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('global-vrouter-config') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'global_vrouter_config', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'linklocal_services', u'encapsulation_priorities', u'vxlan_network_identifier_mode', u'id_perms', u'display_name'] + references = [] + back_references = [u'global_system_config_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('global-vrouter-config', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'global_vrouter_config', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'global_vrouter_config', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('global-vrouter-config', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_global_vrouter_config_read', id, rsp_body) + except Exception as e: + pass + + return {'global-vrouter-config': rsp_body} + #end global_vrouter_config_http_get + + def global_vrouter_config_http_put(self, id): + key = 'global-vrouter-config' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_global_vrouter_config_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'global_vrouter_config': + abort(404, 'No global-vrouter-config object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('linklocal_services') + if prop_dict: + buf = cStringIO.StringIO() + xx_linklocal_services = LinklocalServicesTypes(**prop_dict) + xx_linklocal_services.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_linklocal_services = LinklocalServicesTypes() + try: + xx_linklocal_services.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('encapsulation_priorities') + if prop_dict: + buf = cStringIO.StringIO() + xx_encapsulation_priorities = EncapsulationPrioritiesType(**prop_dict) + xx_encapsulation_priorities.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_encapsulation_priorities = EncapsulationPrioritiesType() + try: + xx_encapsulation_priorities.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'global_vrouter_config', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'global_vrouter_config', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('global-vrouter-config') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'global_vrouter_config', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('global-vrouter-config', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'global_vrouter_config', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('global-vrouter-config', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_global_vrouter_config_update', id, obj_dict) + except Exception as e: + pass + + return {'global-vrouter-config': rsp_body} + #end global_vrouter_config_http_put + + def global_vrouter_config_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'global_vrouter_config': + abort(404, 'No global-vrouter-config object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_global_vrouter_config_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'global_system_config_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('global-vrouter-config', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'global_vrouter_config', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'global_vrouter_config', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'global_vrouter_config', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('global-vrouter-config', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('global-vrouter-config') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._global_vrouter_config_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'global_vrouter_config', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('global-vrouter-config', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'global_vrouter_config', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_global_vrouter_config_delete', id, read_result) + except Exception as e: + pass + + #end global_vrouter_config_http_delete + + def global_vrouter_configs_http_post(self): + key = 'global-vrouter-config' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_global_vrouter_config_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('linklocal_services') + if prop_dict: + buf = cStringIO.StringIO() + xx_linklocal_services = LinklocalServicesTypes(**prop_dict) + xx_linklocal_services.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_linklocal_services = LinklocalServicesTypes() + try: + xx_linklocal_services.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('encapsulation_priorities') + if prop_dict: + buf = cStringIO.StringIO() + xx_encapsulation_priorities = EncapsulationPrioritiesType(**prop_dict) + xx_encapsulation_priorities.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_encapsulation_priorities = EncapsulationPrioritiesType() + try: + xx_encapsulation_priorities.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'global-vrouter-config', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'global_vrouter_config', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'global_vrouter_config', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('global-vrouter-config', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'global_vrouter_config', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['global_vrouter_config', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('global-vrouter-config') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'global_vrouter_config', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('global-vrouter-config', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'global_vrouter_config', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('global-vrouter-config', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_global_vrouter_config_create', obj_dict) + except Exception as e: + pass + + return {'global-vrouter-config': rsp_body} + #end global_vrouter_configs_http_post + + def global_vrouter_configs_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'global_vrouter_configs', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('global-vrouter-config', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'global_vrouter_configs', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'global-vrouter-configs': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('global-vrouter-config', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('global-vrouter-config', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('global-vrouter-config', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'linklocal_services', u'encapsulation_priorities', u'vxlan_network_identifier_mode', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('global-vrouter-config', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('global-vrouter-config', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'global-vrouter-config': obj_dict}) + + return {'global-vrouter-configs': obj_dicts} + #end global_vrouter_configs_http_get + + def _global_vrouter_config_create_default_children(self, parent_obj): + pass + #end _global_vrouter_config_create_default_children + + def _global_vrouter_config_delete_default_children(self, parent_dict): + pass + #end _global_vrouter_config_delete_default_children + + def instance_ip_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_instance_ip_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'instance_ip': + abort(404, 'No instance-ip object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'instance_ip', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('instance-ip') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'instance_ip', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'instance_ip_address', u'instance_ip_family', u'instance_ip_mode', u'subnet_uuid', u'id_perms', u'display_name'] + references = [u'virtual_network_refs', 'virtual_machine_interface_refs'] + back_references = [] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('instance-ip', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'instance_ip', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'instance_ip', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('instance-ip', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_instance_ip_read', id, rsp_body) + except Exception as e: + pass + + return {'instance-ip': rsp_body} + #end instance_ip_http_get + + def instance_ip_http_put(self, id): + key = 'instance-ip' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_instance_ip_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'instance_ip': + abort(404, 'No instance-ip object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'instance_ip', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'instance_ip', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'virtual_network', 'virtual_machine_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('instance-ip') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'instance_ip', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('instance-ip', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'instance_ip', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('instance-ip', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_instance_ip_update', id, obj_dict) + except Exception as e: + pass + + return {'instance-ip': rsp_body} + #end instance_ip_http_put + + def instance_ip_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'instance_ip': + abort(404, 'No instance-ip object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_instance_ip_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('instance-ip', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'instance_ip', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'instance_ip', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'instance_ip', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('instance-ip', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('instance-ip') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._instance_ip_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'instance_ip', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('instance-ip', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'instance_ip', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_instance_ip_delete', id, read_result) + except Exception as e: + pass + + #end instance_ip_http_delete + + def instance_ips_http_post(self): + key = 'instance-ip' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_instance_ip_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'instance-ip', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'instance_ip', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # Validate perms + objtype_list = [u'virtual_network', 'virtual_machine_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('instance-ip', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'instance_ip', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['instance_ip', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('instance-ip') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'instance_ip', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('instance-ip', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'instance_ip', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('instance-ip', obj_ids['uuid']) + + try: + self._extension_mgrs['resourceApi'].map_method('post_instance_ip_create', obj_dict) + except Exception as e: + pass + + return {'instance-ip': rsp_body} + #end instance_ips_http_post + + def instance_ips_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'instance_ips', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('instance-ip', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'instance_ips', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'instance-ips': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('instance-ip', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('instance-ip', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('instance-ip', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'instance_ip_address', u'instance_ip_family', u'instance_ip_mode', u'subnet_uuid', u'id_perms', u'display_name'] + [u'virtual_network_refs', 'virtual_machine_interface_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('instance-ip', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('instance-ip', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'instance-ip': obj_dict}) + + return {'instance-ips': obj_dicts} + #end instance_ips_http_get + + def _instance_ip_create_default_children(self, parent_obj): + pass + #end _instance_ip_create_default_children + + def _instance_ip_delete_default_children(self, parent_dict): + pass + #end _instance_ip_delete_default_children + + def network_policy_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_network_policy_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'network_policy': + abort(404, 'No network-policy object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'network_policy', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('network-policy') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'network_policy', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'network_policy_entries', u'id_perms', u'display_name'] + references = [] + back_references = [u'project_back_refs', u'virtual_network_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('network-policy', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'network_policy', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'network_policy', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('network-policy', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_network_policy_read', id, rsp_body) + except Exception as e: + pass + + return {'network-policy': rsp_body} + #end network_policy_http_get + + def network_policy_http_put(self, id): + key = 'network-policy' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_network_policy_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'network_policy': + abort(404, 'No network-policy object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('network_policy_entries') + if prop_dict: + buf = cStringIO.StringIO() + xx_network_policy_entries = PolicyEntriesType(**prop_dict) + xx_network_policy_entries.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_network_policy_entries = PolicyEntriesType() + try: + xx_network_policy_entries.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'network_policy', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'network_policy', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('network-policy') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'network_policy', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('network-policy', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'network_policy', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('network-policy', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_network_policy_update', id, obj_dict) + except Exception as e: + pass + + return {'network-policy': rsp_body} + #end network_policy_http_put + + def network_policy_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'network_policy': + abort(404, 'No network-policy object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_network_policy_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'project_back_refs', u'virtual_network_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('network-policy', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'network_policy', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'network_policy', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'network_policy', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('network-policy', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('network-policy') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + virtual_network_back_refs = read_result.get('virtual_network_back_refs', None) + if virtual_network_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_network_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'network_policy', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._network_policy_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'network_policy', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('network-policy', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'network_policy', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_network_policy_delete', id, read_result) + except Exception as e: + pass + + #end network_policy_http_delete + + def network_policys_http_post(self): + key = 'network-policy' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_network_policy_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('network_policy_entries') + if prop_dict: + buf = cStringIO.StringIO() + xx_network_policy_entries = PolicyEntriesType(**prop_dict) + xx_network_policy_entries.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_network_policy_entries = PolicyEntriesType() + try: + xx_network_policy_entries.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'network-policy', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'network_policy', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'network_policy', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('network-policy', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'network_policy', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['network_policy', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('network-policy') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'network_policy', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('network-policy', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'network_policy', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('network-policy', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_network_policy_create', obj_dict) + except Exception as e: + pass + + return {'network-policy': rsp_body} + #end network_policys_http_post + + def network_policys_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'network_policys', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('network-policy', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'network_policys', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'network-policys': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('network-policy', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('network-policy', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('network-policy', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'network_policy_entries', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('network-policy', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('network-policy', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'network-policy': obj_dict}) + + return {'network-policys': obj_dicts} + #end network_policys_http_get + + def _network_policy_create_default_children(self, parent_obj): + pass + #end _network_policy_create_default_children + + def _network_policy_delete_default_children(self, parent_dict): + pass + #end _network_policy_delete_default_children + + def loadbalancer_pool_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_pool_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'loadbalancer_pool': + abort(404, 'No loadbalancer-pool object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'loadbalancer_pool', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('loadbalancer-pool') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'loadbalancer_pool', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'loadbalancer_pool_properties', u'loadbalancer_pool_provider', u'id_perms', u'display_name'] + references = [u'service_instance_refs', 'virtual_machine_interface_refs', u'service_appliance_set_refs', u'loadbalancer_healthmonitor_refs'] + back_references = [u'project_back_refs', u'virtual_ip_back_refs'] + children = [u'loadbalancer_members'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('loadbalancer-pool', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'loadbalancer_pool', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'loadbalancer_pool', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('loadbalancer-pool', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_pool_read', id, rsp_body) + except Exception as e: + pass + + return {'loadbalancer-pool': rsp_body} + #end loadbalancer_pool_http_get + + def loadbalancer_pool_http_put(self, id): + key = 'loadbalancer-pool' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_pool_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'loadbalancer_pool': + abort(404, 'No loadbalancer-pool object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('loadbalancer_pool_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_loadbalancer_pool_properties = LoadbalancerPoolType(**prop_dict) + xx_loadbalancer_pool_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_loadbalancer_pool_properties = LoadbalancerPoolType() + try: + xx_loadbalancer_pool_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'loadbalancer_pool', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'loadbalancer_pool', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'service_instance', 'virtual_machine_interface', u'service_appliance_set', u'loadbalancer_member', u'loadbalancer_healthmonitor'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('loadbalancer-pool') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'loadbalancer_pool', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('loadbalancer-pool', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'loadbalancer_pool', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('loadbalancer-pool', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_pool_update', id, obj_dict) + except Exception as e: + pass + + return {'loadbalancer-pool': rsp_body} + #end loadbalancer_pool_http_put + + def loadbalancer_pool_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'loadbalancer_pool': + abort(404, 'No loadbalancer-pool object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_pool_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'project_back_refs', u'virtual_ip_back_refs'] + children = [u'loadbalancer_members'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('loadbalancer-pool', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'loadbalancer_pool', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'loadbalancer_pool', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'loadbalancer_pool', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('loadbalancer-pool', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('loadbalancer-pool') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + loadbalancer_members = read_result.get('loadbalancer_members', None) + if loadbalancer_members: + has_infos = read_result['loadbalancer_members'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-loadbalancer-member')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'loadbalancer_pool', 'http_delete', err_msg) + abort(409, err_msg) + + virtual_ip_back_refs = read_result.get('virtual_ip_back_refs', None) + if virtual_ip_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_ip_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'loadbalancer_pool', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._loadbalancer_pool_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'loadbalancer_pool', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('loadbalancer-pool', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'loadbalancer_pool', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_pool_delete', id, read_result) + except Exception as e: + pass + + #end loadbalancer_pool_http_delete + + def loadbalancer_pools_http_post(self): + key = 'loadbalancer-pool' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_pool_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('loadbalancer_pool_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_loadbalancer_pool_properties = LoadbalancerPoolType(**prop_dict) + xx_loadbalancer_pool_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_loadbalancer_pool_properties = LoadbalancerPoolType() + try: + xx_loadbalancer_pool_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'loadbalancer-pool', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'loadbalancer_pool', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'loadbalancer_pool', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'service_instance', 'virtual_machine_interface', u'service_appliance_set', u'loadbalancer_member', u'loadbalancer_healthmonitor'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('loadbalancer-pool', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'loadbalancer_pool', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['loadbalancer_pool', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('loadbalancer-pool') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'loadbalancer_pool', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('loadbalancer-pool', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'loadbalancer_pool', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('loadbalancer-pool', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_pool_create', obj_dict) + except Exception as e: + pass + + return {'loadbalancer-pool': rsp_body} + #end loadbalancer_pools_http_post + + def loadbalancer_pools_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'loadbalancer_pools', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('loadbalancer-pool', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'loadbalancer_pools', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'loadbalancer-pools': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('loadbalancer-pool', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('loadbalancer-pool', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('loadbalancer-pool', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'loadbalancer_pool_properties', u'loadbalancer_pool_provider', u'id_perms', u'display_name'] + [u'service_instance_refs', 'virtual_machine_interface_refs', u'service_appliance_set_refs', u'loadbalancer_healthmonitor_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('loadbalancer-pool', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('loadbalancer-pool', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'loadbalancer-pool': obj_dict}) + + return {'loadbalancer-pools': obj_dicts} + #end loadbalancer_pools_http_get + + def _loadbalancer_pool_create_default_children(self, parent_obj): + # Create a default child only if provisioned for + r_class = self.get_resource_class('loadbalancer-member') + if r_class and r_class.generate_default_instance: + child_obj = LoadbalancerMember(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('loadbalancer-member') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('loadbalancer-member', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('loadbalancer-member', obj_ids, child_dict) + self._loadbalancer_member_create_default_children(child_obj) + + pass + #end _loadbalancer_pool_create_default_children + + def _loadbalancer_pool_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('loadbalancer-member') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('loadbalancer_members') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-loadbalancer-member': + default_child_id = has_info['href'].split('/')[-1] + self.loadbalancer_member_http_delete(default_child_id) + break + + pass + #end _loadbalancer_pool_delete_default_children + + def virtual_DNS_record_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_record_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'virtual_DNS_record': + abort(404, 'No virtual-DNS-record object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_DNS_record', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('virtual-DNS-record') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_DNS_record', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'virtual_DNS_record_data', u'id_perms', u'display_name'] + references = [] + back_references = [u'virtual_DNS_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('virtual-DNS-record', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_DNS_record', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'virtual_DNS_record', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-DNS-record', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_record_read', id, rsp_body) + except Exception as e: + pass + + return {'virtual-DNS-record': rsp_body} + #end virtual_DNS_record_http_get + + def virtual_DNS_record_http_put(self, id): + key = 'virtual-DNS-record' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_record_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_DNS_record': + abort(404, 'No virtual-DNS-record object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('virtual_DNS_record_data') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_DNS_record_data = VirtualDnsRecordType(**prop_dict) + xx_virtual_DNS_record_data.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_DNS_record_data = VirtualDnsRecordType() + try: + xx_virtual_DNS_record_data.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'virtual_DNS_record', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_DNS_record', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('virtual-DNS-record') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'virtual_DNS_record', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('virtual-DNS-record', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_DNS_record', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-DNS-record', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_record_update', id, obj_dict) + except Exception as e: + pass + + return {'virtual-DNS-record': rsp_body} + #end virtual_DNS_record_http_put + + def virtual_DNS_record_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_DNS_record': + abort(404, 'No virtual-DNS-record object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_record_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'virtual_DNS_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('virtual-DNS-record', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'virtual_DNS_record', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'virtual_DNS_record', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_DNS_record', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-DNS-record', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('virtual-DNS-record') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._virtual_DNS_record_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_DNS_record', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('virtual-DNS-record', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_DNS_record', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_record_delete', id, read_result) + except Exception as e: + pass + + #end virtual_DNS_record_http_delete + + def virtual_DNS_records_http_post(self): + key = 'virtual-DNS-record' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_record_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('virtual_DNS_record_data') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_DNS_record_data = VirtualDnsRecordType(**prop_dict) + xx_virtual_DNS_record_data.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_DNS_record_data = VirtualDnsRecordType() + try: + xx_virtual_DNS_record_data.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'virtual-DNS-record', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'virtual_DNS_record', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'virtual_DNS_record', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('virtual-DNS-record', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'virtual_DNS_record', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['virtual_DNS_record', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('virtual-DNS-record') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_DNS_record', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('virtual-DNS-record', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_DNS_record', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('virtual-DNS-record', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_record_create', obj_dict) + except Exception as e: + pass + + return {'virtual-DNS-record': rsp_body} + #end virtual_DNS_records_http_post + + def virtual_DNS_records_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'virtual_DNS_records', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('virtual-DNS-record', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'virtual_DNS_records', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'virtual-DNS-records': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('virtual-DNS-record', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('virtual-DNS-record', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('virtual-DNS-record', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'virtual_DNS_record_data', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('virtual-DNS-record', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('virtual-DNS-record', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'virtual-DNS-record': obj_dict}) + + return {'virtual-DNS-records': obj_dicts} + #end virtual_DNS_records_http_get + + def _virtual_DNS_record_create_default_children(self, parent_obj): + pass + #end _virtual_DNS_record_create_default_children + + def _virtual_DNS_record_delete_default_children(self, parent_dict): + pass + #end _virtual_DNS_record_delete_default_children + + def route_target_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_route_target_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'route_target': + abort(404, 'No route-target object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'route_target', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('route-target') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'route_target', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'id_perms', u'display_name'] + references = [] + back_references = [u'logical_router_back_refs', 'routing_instance_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('route-target', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'route_target', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'route_target', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('route-target', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_route_target_read', id, rsp_body) + except Exception as e: + pass + + return {'route-target': rsp_body} + #end route_target_http_get + + def route_target_http_put(self, id): + key = 'route-target' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_route_target_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'route_target': + abort(404, 'No route-target object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'route_target', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'route_target', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('route-target') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'route_target', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('route-target', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'route_target', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('route-target', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_route_target_update', id, obj_dict) + except Exception as e: + pass + + return {'route-target': rsp_body} + #end route_target_http_put + + def route_target_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'route_target': + abort(404, 'No route-target object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_route_target_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'logical_router_back_refs', 'routing_instance_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('route-target', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'route_target', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'route_target', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'route_target', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('route-target', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('route-target') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + logical_router_back_refs = read_result.get('logical_router_back_refs', None) + if logical_router_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['logical_router_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'route_target', 'http_delete', err_msg) + abort(409, err_msg) + + routing_instance_back_refs = read_result.get('routing_instance_back_refs', None) + if routing_instance_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['routing_instance_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'route_target', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._route_target_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'route_target', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('route-target', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'route_target', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_route_target_delete', id, read_result) + except Exception as e: + pass + + #end route_target_http_delete + + def route_targets_http_post(self): + key = 'route-target' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_route_target_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'route-target', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'route_target', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('route-target', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'route_target', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['route_target', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('route-target') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'route_target', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('route-target', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'route_target', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('route-target', obj_ids['uuid']) + + try: + self._extension_mgrs['resourceApi'].map_method('post_route_target_create', obj_dict) + except Exception as e: + pass + + return {'route-target': rsp_body} + #end route_targets_http_post + + def route_targets_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'route_targets', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('route-target', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'route_targets', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'route-targets': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('route-target', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('route-target', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('route-target', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('route-target', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('route-target', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'route-target': obj_dict}) + + return {'route-targets': obj_dicts} + #end route_targets_http_get + + def _route_target_create_default_children(self, parent_obj): + pass + #end _route_target_create_default_children + + def _route_target_delete_default_children(self, parent_dict): + pass + #end _route_target_delete_default_children + + def floating_ip_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'floating_ip': + abort(404, 'No floating-ip object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'floating_ip', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('floating-ip') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'floating_ip', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'floating_ip_address', u'floating_ip_is_virtual_ip', u'floating_ip_fixed_ip_address', u'floating_ip_address_family', u'id_perms', u'display_name'] + references = [u'project_refs', 'virtual_machine_interface_refs'] + back_references = [u'floating_ip_pool_back_refs', 'customer_attachment_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('floating-ip', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'floating_ip', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'floating_ip', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('floating-ip', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_floating_ip_read', id, rsp_body) + except Exception as e: + pass + + return {'floating-ip': rsp_body} + #end floating_ip_http_get + + def floating_ip_http_put(self, id): + key = 'floating-ip' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'floating_ip': + abort(404, 'No floating-ip object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'floating_ip', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'floating_ip', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'project', 'virtual_machine_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('floating-ip') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'floating_ip', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('floating-ip', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'floating_ip', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('floating-ip', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_floating_ip_update', id, obj_dict) + except Exception as e: + pass + + return {'floating-ip': rsp_body} + #end floating_ip_http_put + + def floating_ip_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'floating_ip': + abort(404, 'No floating-ip object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'floating_ip_pool_back_refs', 'customer_attachment_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('floating-ip', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'floating_ip', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'floating_ip', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'floating_ip', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('floating-ip', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('floating-ip') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + customer_attachment_back_refs = read_result.get('customer_attachment_back_refs', None) + if customer_attachment_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['customer_attachment_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'floating_ip', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._floating_ip_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'floating_ip', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('floating-ip', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'floating_ip', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_floating_ip_delete', id, read_result) + except Exception as e: + pass + + #end floating_ip_http_delete + + def floating_ips_http_post(self): + key = 'floating-ip' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'floating-ip', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'floating_ip', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'floating_ip', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'project', 'virtual_machine_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('floating-ip', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'floating_ip', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['floating_ip', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('floating-ip') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'floating_ip', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('floating-ip', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'floating_ip', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('floating-ip', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_floating_ip_create', obj_dict) + except Exception as e: + pass + + return {'floating-ip': rsp_body} + #end floating_ips_http_post + + def floating_ips_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'floating_ips', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('floating-ip', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'floating_ips', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'floating-ips': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('floating-ip', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('floating-ip', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('floating-ip', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'floating_ip_address', u'floating_ip_is_virtual_ip', u'floating_ip_fixed_ip_address', u'floating_ip_address_family', u'id_perms', u'display_name'] + [u'project_refs', 'virtual_machine_interface_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('floating-ip', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('floating-ip', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'floating-ip': obj_dict}) + + return {'floating-ips': obj_dicts} + #end floating_ips_http_get + + def _floating_ip_create_default_children(self, parent_obj): + pass + #end _floating_ip_create_default_children + + def _floating_ip_delete_default_children(self, parent_dict): + pass + #end _floating_ip_delete_default_children + + def floating_ip_pool_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_pool_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'floating_ip_pool': + abort(404, 'No floating-ip-pool object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'floating_ip_pool', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('floating-ip-pool') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'floating_ip_pool', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'floating_ip_pool_prefixes', u'id_perms', u'display_name'] + references = [] + back_references = [u'virtual_network_back_refs', u'project_back_refs'] + children = [u'floating_ips'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('floating-ip-pool', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'floating_ip_pool', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'floating_ip_pool', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('floating-ip-pool', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_floating_ip_pool_read', id, rsp_body) + except Exception as e: + pass + + return {'floating-ip-pool': rsp_body} + #end floating_ip_pool_http_get + + def floating_ip_pool_http_put(self, id): + key = 'floating-ip-pool' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_pool_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'floating_ip_pool': + abort(404, 'No floating-ip-pool object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('floating_ip_pool_prefixes') + if prop_dict: + buf = cStringIO.StringIO() + xx_floating_ip_pool_prefixes = FloatingIpPoolType(**prop_dict) + xx_floating_ip_pool_prefixes.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_floating_ip_pool_prefixes = FloatingIpPoolType() + try: + xx_floating_ip_pool_prefixes.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'floating_ip_pool', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'floating_ip_pool', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'floating_ip'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('floating-ip-pool') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'floating_ip_pool', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('floating-ip-pool', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'floating_ip_pool', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('floating-ip-pool', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_floating_ip_pool_update', id, obj_dict) + except Exception as e: + pass + + return {'floating-ip-pool': rsp_body} + #end floating_ip_pool_http_put + + def floating_ip_pool_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'floating_ip_pool': + abort(404, 'No floating-ip-pool object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_pool_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'virtual_network_back_refs', u'project_back_refs'] + children = [u'floating_ips'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('floating-ip-pool', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'floating_ip_pool', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'floating_ip_pool', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'floating_ip_pool', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('floating-ip-pool', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('floating-ip-pool') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + floating_ips = read_result.get('floating_ips', None) + if floating_ips: + has_infos = read_result['floating_ips'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-floating-ip')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'floating_ip_pool', 'http_delete', err_msg) + abort(409, err_msg) + + project_back_refs = read_result.get('project_back_refs', None) + if project_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['project_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'floating_ip_pool', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._floating_ip_pool_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'floating_ip_pool', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('floating-ip-pool', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'floating_ip_pool', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_floating_ip_pool_delete', id, read_result) + except Exception as e: + pass + + #end floating_ip_pool_http_delete + + def floating_ip_pools_http_post(self): + key = 'floating-ip-pool' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_floating_ip_pool_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('floating_ip_pool_prefixes') + if prop_dict: + buf = cStringIO.StringIO() + xx_floating_ip_pool_prefixes = FloatingIpPoolType(**prop_dict) + xx_floating_ip_pool_prefixes.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_floating_ip_pool_prefixes = FloatingIpPoolType() + try: + xx_floating_ip_pool_prefixes.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'floating-ip-pool', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'floating_ip_pool', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'floating_ip_pool', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'floating_ip'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('floating-ip-pool', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'floating_ip_pool', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['floating_ip_pool', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('floating-ip-pool') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'floating_ip_pool', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('floating-ip-pool', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'floating_ip_pool', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('floating-ip-pool', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_floating_ip_pool_create', obj_dict) + except Exception as e: + pass + + return {'floating-ip-pool': rsp_body} + #end floating_ip_pools_http_post + + def floating_ip_pools_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'floating_ip_pools', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('floating-ip-pool', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'floating_ip_pools', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'floating-ip-pools': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('floating-ip-pool', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('floating-ip-pool', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('floating-ip-pool', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'floating_ip_pool_prefixes', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('floating-ip-pool', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('floating-ip-pool', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'floating-ip-pool': obj_dict}) + + return {'floating-ip-pools': obj_dicts} + #end floating_ip_pools_http_get + + def _floating_ip_pool_create_default_children(self, parent_obj): + # Create a default child only if provisioned for + r_class = self.get_resource_class('floating-ip') + if r_class and r_class.generate_default_instance: + child_obj = FloatingIp(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('floating-ip') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('floating-ip', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('floating-ip', obj_ids, child_dict) + self._floating_ip_create_default_children(child_obj) + + pass + #end _floating_ip_pool_create_default_children + + def _floating_ip_pool_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('floating-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('floating_ips') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-floating-ip': + default_child_id = has_info['href'].split('/')[-1] + self.floating_ip_http_delete(default_child_id) + break + + pass + #end _floating_ip_pool_delete_default_children + + def physical_router_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_physical_router_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'physical_router': + abort(404, 'No physical-router object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'physical_router', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('physical-router') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'physical_router', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'physical_router_management_ip', u'physical_router_dataplane_ip', u'physical_router_vendor_name', u'physical_router_product_name', u'physical_router_vnc_managed', u'physical_router_user_credentials', u'physical_router_snmp_credentials', u'physical_router_junos_service_ports', u'id_perms', u'display_name'] + references = ['virtual_router_refs', 'bgp_router_refs', u'virtual_network_refs'] + back_references = [u'global_system_config_back_refs'] + children = [u'physical_interfaces', u'logical_interfaces'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('physical-router', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'physical_router', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'physical_router', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('physical-router', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_physical_router_read', id, rsp_body) + except Exception as e: + pass + + return {'physical-router': rsp_body} + #end physical_router_http_get + + def physical_router_http_put(self, id): + key = 'physical-router' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_physical_router_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'physical_router': + abort(404, 'No physical-router object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('physical_router_user_credentials') + if prop_dict: + buf = cStringIO.StringIO() + xx_physical_router_user_credentials = UserCredentials(**prop_dict) + xx_physical_router_user_credentials.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_physical_router_user_credentials = UserCredentials() + try: + xx_physical_router_user_credentials.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('physical_router_snmp_credentials') + if prop_dict: + buf = cStringIO.StringIO() + xx_physical_router_snmp_credentials = SNMPCredentials(**prop_dict) + xx_physical_router_snmp_credentials.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_physical_router_snmp_credentials = SNMPCredentials() + try: + xx_physical_router_snmp_credentials.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('physical_router_junos_service_ports') + if prop_dict: + buf = cStringIO.StringIO() + xx_physical_router_junos_service_ports = JunosServicePorts(**prop_dict) + xx_physical_router_junos_service_ports.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_physical_router_junos_service_ports = JunosServicePorts() + try: + xx_physical_router_junos_service_ports.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'physical_router', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'physical_router', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = ['virtual_router', 'bgp_router', u'virtual_network', u'physical_interface', u'logical_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('physical-router') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'physical_router', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('physical-router', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'physical_router', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('physical-router', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_physical_router_update', id, obj_dict) + except Exception as e: + pass + + return {'physical-router': rsp_body} + #end physical_router_http_put + + def physical_router_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'physical_router': + abort(404, 'No physical-router object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_physical_router_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'global_system_config_back_refs'] + children = [u'physical_interfaces', u'logical_interfaces'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('physical-router', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'physical_router', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'physical_router', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'physical_router', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('physical-router', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('physical-router') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + physical_interfaces = read_result.get('physical_interfaces', None) + if physical_interfaces: + has_infos = read_result['physical_interfaces'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-physical-interface')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'physical_router', 'http_delete', err_msg) + abort(409, err_msg) + + logical_interfaces = read_result.get('logical_interfaces', None) + if logical_interfaces: + has_infos = read_result['logical_interfaces'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-logical-interface')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'physical_router', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._physical_router_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'physical_router', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('physical-router', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'physical_router', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_physical_router_delete', id, read_result) + except Exception as e: + pass + + #end physical_router_http_delete + + def physical_routers_http_post(self): + key = 'physical-router' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_physical_router_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('physical_router_user_credentials') + if prop_dict: + buf = cStringIO.StringIO() + xx_physical_router_user_credentials = UserCredentials(**prop_dict) + xx_physical_router_user_credentials.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_physical_router_user_credentials = UserCredentials() + try: + xx_physical_router_user_credentials.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('physical_router_snmp_credentials') + if prop_dict: + buf = cStringIO.StringIO() + xx_physical_router_snmp_credentials = SNMPCredentials(**prop_dict) + xx_physical_router_snmp_credentials.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_physical_router_snmp_credentials = SNMPCredentials() + try: + xx_physical_router_snmp_credentials.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('physical_router_junos_service_ports') + if prop_dict: + buf = cStringIO.StringIO() + xx_physical_router_junos_service_ports = JunosServicePorts(**prop_dict) + xx_physical_router_junos_service_ports.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_physical_router_junos_service_ports = JunosServicePorts() + try: + xx_physical_router_junos_service_ports.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'physical-router', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'physical_router', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'physical_router', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = ['virtual_router', 'bgp_router', u'virtual_network', u'physical_interface', u'logical_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('physical-router', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'physical_router', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['physical_router', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('physical-router') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'physical_router', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('physical-router', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'physical_router', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('physical-router', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_physical_router_create', obj_dict) + except Exception as e: + pass + + return {'physical-router': rsp_body} + #end physical_routers_http_post + + def physical_routers_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'physical_routers', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('physical-router', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'physical_routers', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'physical-routers': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('physical-router', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('physical-router', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('physical-router', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'physical_router_management_ip', u'physical_router_dataplane_ip', u'physical_router_vendor_name', u'physical_router_product_name', u'physical_router_vnc_managed', u'physical_router_user_credentials', u'physical_router_snmp_credentials', u'physical_router_junos_service_ports', u'id_perms', u'display_name'] + ['virtual_router_refs', 'bgp_router_refs', u'virtual_network_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('physical-router', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('physical-router', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'physical-router': obj_dict}) + + return {'physical-routers': obj_dicts} + #end physical_routers_http_get + + def _physical_router_create_default_children(self, parent_obj): + # Create a default child only if provisioned for + r_class = self.get_resource_class('physical-interface') + if r_class and r_class.generate_default_instance: + child_obj = PhysicalInterface(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('physical-interface') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('physical-interface', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('physical-interface', obj_ids, child_dict) + self._physical_interface_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('logical-interface') + if r_class and r_class.generate_default_instance: + child_obj = LogicalInterface(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('logical-interface') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('logical-interface', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('logical-interface', obj_ids, child_dict) + self._logical_interface_create_default_children(child_obj) + + pass + #end _physical_router_create_default_children + + def _physical_router_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('logical-interface') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('physical_interfaces') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-physical-interface': + default_child_id = has_info['href'].split('/')[-1] + self.physical_interface_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('logical-interface') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('logical_interfaces') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-logical-interface': + default_child_id = has_info['href'].split('/')[-1] + self.logical_interface_http_delete(default_child_id) + break + + pass + #end _physical_router_delete_default_children + + def bgp_router_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_bgp_router_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'bgp_router': + abort(404, 'No bgp-router object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'bgp_router', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('bgp-router') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'bgp_router', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'bgp_router_parameters', u'id_perms', u'display_name'] + references = ['bgp_router_refs'] + back_references = [u'global_system_config_back_refs', u'physical_router_back_refs', 'virtual_router_back_refs', 'routing_instance_back_refs', 'bgp_router_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('bgp-router', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'bgp_router', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'bgp_router', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('bgp-router', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_bgp_router_read', id, rsp_body) + except Exception as e: + pass + + return {'bgp-router': rsp_body} + #end bgp_router_http_get + + def bgp_router_http_put(self, id): + key = 'bgp-router' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_bgp_router_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'bgp_router': + abort(404, 'No bgp-router object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('bgp_router_parameters') + if prop_dict: + buf = cStringIO.StringIO() + xx_bgp_router_parameters = BgpRouterParams(**prop_dict) + xx_bgp_router_parameters.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_bgp_router_parameters = BgpRouterParams() + try: + xx_bgp_router_parameters.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('bgp_router_refs') or []: + if fq_name == ref_dict['to']: + abort(404, 'Cannot add reference to self') + buf = cStringIO.StringIO() + xx_bgp_router = BgpPeeringAttributes(**ref_dict['attr']) + xx_bgp_router.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_bgp_router.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'bgp_router', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'bgp_router', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = ['bgp_router'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('bgp-router') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'bgp_router', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('bgp-router', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'bgp_router', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('bgp-router', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_bgp_router_update', id, obj_dict) + except Exception as e: + pass + + return {'bgp-router': rsp_body} + #end bgp_router_http_put + + def bgp_router_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'bgp_router': + abort(404, 'No bgp-router object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_bgp_router_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'global_system_config_back_refs', u'physical_router_back_refs', 'virtual_router_back_refs', 'routing_instance_back_refs', 'bgp_router_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('bgp-router', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'bgp_router', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'bgp_router', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'bgp_router', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('bgp-router', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('bgp-router') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + global_system_config_back_refs = read_result.get('global_system_config_back_refs', None) + if global_system_config_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['global_system_config_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'bgp_router', 'http_delete', err_msg) + abort(409, err_msg) + + physical_router_back_refs = read_result.get('physical_router_back_refs', None) + if physical_router_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['physical_router_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'bgp_router', 'http_delete', err_msg) + abort(409, err_msg) + + virtual_router_back_refs = read_result.get('virtual_router_back_refs', None) + if virtual_router_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_router_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'bgp_router', 'http_delete', err_msg) + abort(409, err_msg) + + bgp_router_back_refs = read_result.get('bgp_router_back_refs', None) + if bgp_router_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['bgp_router_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'bgp_router', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._bgp_router_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'bgp_router', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('bgp-router', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'bgp_router', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_bgp_router_delete', id, read_result) + except Exception as e: + pass + + #end bgp_router_http_delete + + def bgp_routers_http_post(self): + key = 'bgp-router' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_bgp_router_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('bgp_router_parameters') + if prop_dict: + buf = cStringIO.StringIO() + xx_bgp_router_parameters = BgpRouterParams(**prop_dict) + xx_bgp_router_parameters.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_bgp_router_parameters = BgpRouterParams() + try: + xx_bgp_router_parameters.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('bgp_router_refs') or []: + if fq_name == ref_dict['to']: + abort(404, 'Cannot add reference to self') + buf = cStringIO.StringIO() + xx_bgp_router = BgpPeeringAttributes(**ref_dict['attr']) + xx_bgp_router.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_bgp_router.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'bgp-router', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'bgp_router', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'bgp_router', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = ['bgp_router'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('bgp-router', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'bgp_router', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['bgp_router', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('bgp-router') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'bgp_router', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('bgp-router', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'bgp_router', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('bgp-router', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_bgp_router_create', obj_dict) + except Exception as e: + pass + + return {'bgp-router': rsp_body} + #end bgp_routers_http_post + + def bgp_routers_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'bgp_routers', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('bgp-router', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'bgp_routers', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'bgp-routers': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('bgp-router', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('bgp-router', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('bgp-router', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'bgp_router_parameters', u'id_perms', u'display_name'] + ['bgp_router_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('bgp-router', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('bgp-router', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'bgp-router': obj_dict}) + + return {'bgp-routers': obj_dicts} + #end bgp_routers_http_get + + def _bgp_router_create_default_children(self, parent_obj): + pass + #end _bgp_router_create_default_children + + def _bgp_router_delete_default_children(self, parent_dict): + pass + #end _bgp_router_delete_default_children + + def virtual_router_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_router_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'virtual_router': + abort(404, 'No virtual-router object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_router', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('virtual-router') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_router', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'virtual_router_type', u'virtual_router_ip_address', u'id_perms', u'display_name'] + references = ['bgp_router_refs', u'virtual_machine_refs'] + back_references = [u'physical_router_back_refs', u'global_system_config_back_refs', 'provider_attachment_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('virtual-router', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_router', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'virtual_router', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-router', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_router_read', id, rsp_body) + except Exception as e: + pass + + return {'virtual-router': rsp_body} + #end virtual_router_http_get + + def virtual_router_http_put(self, id): + key = 'virtual-router' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_router_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_router': + abort(404, 'No virtual-router object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'virtual_router', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_router', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = ['bgp_router', u'virtual_machine'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('virtual-router') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'virtual_router', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('virtual-router', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_router', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-router', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_router_update', id, obj_dict) + except Exception as e: + pass + + return {'virtual-router': rsp_body} + #end virtual_router_http_put + + def virtual_router_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_router': + abort(404, 'No virtual-router object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_router_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'physical_router_back_refs', u'global_system_config_back_refs', 'provider_attachment_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('virtual-router', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'virtual_router', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'virtual_router', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_router', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-router', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('virtual-router') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + physical_router_back_refs = read_result.get('physical_router_back_refs', None) + if physical_router_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['physical_router_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_router', 'http_delete', err_msg) + abort(409, err_msg) + + provider_attachment_back_refs = read_result.get('provider_attachment_back_refs', None) + if provider_attachment_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['provider_attachment_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_router', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._virtual_router_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_router', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('virtual-router', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_router', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_router_delete', id, read_result) + except Exception as e: + pass + + #end virtual_router_http_delete + + def virtual_routers_http_post(self): + key = 'virtual-router' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_router_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'virtual-router', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'virtual_router', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'virtual_router', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = ['bgp_router', u'virtual_machine'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('virtual-router', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'virtual_router', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['virtual_router', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('virtual-router') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_router', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('virtual-router', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_router', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('virtual-router', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_router_create', obj_dict) + except Exception as e: + pass + + return {'virtual-router': rsp_body} + #end virtual_routers_http_post + + def virtual_routers_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'virtual_routers', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('virtual-router', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'virtual_routers', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'virtual-routers': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('virtual-router', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('virtual-router', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('virtual-router', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'virtual_router_type', u'virtual_router_ip_address', u'id_perms', u'display_name'] + ['bgp_router_refs', u'virtual_machine_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('virtual-router', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('virtual-router', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'virtual-router': obj_dict}) + + return {'virtual-routers': obj_dicts} + #end virtual_routers_http_get + + def _virtual_router_create_default_children(self, parent_obj): + pass + #end _virtual_router_create_default_children + + def _virtual_router_delete_default_children(self, parent_dict): + pass + #end _virtual_router_delete_default_children + + def config_root_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_config_root_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'config_root': + abort(404, 'No config-root object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'config_root', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('config-root') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'config_root', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'id_perms', u'display_name'] + references = [] + back_references = [] + children = [u'global_system_configs', u'domains'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('config-root', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'config_root', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'config_root', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('config-root', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_config_root_read', id, rsp_body) + except Exception as e: + pass + + return {'config-root': rsp_body} + #end config_root_http_get + + def config_root_http_put(self, id): + key = 'config-root' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_config_root_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'config_root': + abort(404, 'No config-root object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'config_root', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'config_root', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'global_system_config', u'domain'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('config-root') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'config_root', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('config-root', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'config_root', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('config-root', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_config_root_update', id, obj_dict) + except Exception as e: + pass + + return {'config-root': rsp_body} + #end config_root_http_put + + def config_root_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'config_root': + abort(404, 'No config-root object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_config_root_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [] + children = [u'global_system_configs', u'domains'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('config-root', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'config_root', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'config_root', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'config_root', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('config-root', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('config-root') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + global_system_configs = read_result.get('global_system_configs', None) + if global_system_configs: + has_infos = read_result['global_system_configs'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-global-system-config')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'config_root', 'http_delete', err_msg) + abort(409, err_msg) + + domains = read_result.get('domains', None) + if domains: + has_infos = read_result['domains'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-domain')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'config_root', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._config_root_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'config_root', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('config-root', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'config_root', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_config_root_delete', id, read_result) + except Exception as e: + pass + + #end config_root_http_delete + + def config_roots_http_post(self): + key = 'config-root' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_config_root_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'config-root', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'config_root', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # Validate perms + objtype_list = [u'global_system_config', u'domain'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('config-root', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'config_root', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['config_root', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('config-root') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'config_root', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('config-root', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'config_root', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('config-root', obj_ids['uuid']) + + try: + self._extension_mgrs['resourceApi'].map_method('post_config_root_create', obj_dict) + except Exception as e: + pass + + return {'config-root': rsp_body} + #end config_roots_http_post + + def config_roots_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'config_roots', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('config-root', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'config_roots', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'config-roots': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('config-root', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('config-root', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('config-root', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('config-root', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('config-root', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'config-root': obj_dict}) + + return {'config-roots': obj_dicts} + #end config_roots_http_get + + def _config_root_create_default_children(self, parent_obj): + # Create a default child only if provisioned for + r_class = self.get_resource_class('global-system-config') + if r_class and r_class.generate_default_instance: + child_obj = GlobalSystemConfig(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('global-system-config') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('global-system-config', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('global-system-config', obj_ids, child_dict) + self._global_system_config_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('domain') + if r_class and r_class.generate_default_instance: + child_obj = Domain(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('domain') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('domain', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('domain', obj_ids, child_dict) + self._domain_create_default_children(child_obj) + + pass + #end _config_root_create_default_children + + def _config_root_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('domain') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('global_system_configs') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-global-system-config': + default_child_id = has_info['href'].split('/')[-1] + self.global_system_config_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('domain') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('domains') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-domain': + default_child_id = has_info['href'].split('/')[-1] + self.domain_http_delete(default_child_id) + break + + pass + #end _config_root_delete_default_children + + def subnet_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_subnet_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'subnet': + abort(404, 'No subnet object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'subnet', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('subnet') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'subnet', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'subnet_ip_prefix', u'id_perms', u'display_name'] + references = ['virtual_machine_interface_refs'] + back_references = [] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('subnet', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'subnet', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'subnet', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('subnet', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_subnet_read', id, rsp_body) + except Exception as e: + pass + + return {'subnet': rsp_body} + #end subnet_http_get + + def subnet_http_put(self, id): + key = 'subnet' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_subnet_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'subnet': + abort(404, 'No subnet object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('subnet_ip_prefix') + if prop_dict: + buf = cStringIO.StringIO() + xx_subnet_ip_prefix = SubnetType(**prop_dict) + xx_subnet_ip_prefix.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_subnet_ip_prefix = SubnetType() + try: + xx_subnet_ip_prefix.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'subnet', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'subnet', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = ['virtual_machine_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('subnet') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'subnet', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('subnet', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'subnet', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('subnet', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_subnet_update', id, obj_dict) + except Exception as e: + pass + + return {'subnet': rsp_body} + #end subnet_http_put + + def subnet_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'subnet': + abort(404, 'No subnet object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_subnet_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('subnet', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'subnet', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'subnet', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'subnet', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('subnet', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('subnet') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._subnet_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'subnet', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('subnet', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'subnet', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_subnet_delete', id, read_result) + except Exception as e: + pass + + #end subnet_http_delete + + def subnets_http_post(self): + key = 'subnet' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_subnet_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('subnet_ip_prefix') + if prop_dict: + buf = cStringIO.StringIO() + xx_subnet_ip_prefix = SubnetType(**prop_dict) + xx_subnet_ip_prefix.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_subnet_ip_prefix = SubnetType() + try: + xx_subnet_ip_prefix.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'subnet', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'subnet', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # Validate perms + objtype_list = ['virtual_machine_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('subnet', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'subnet', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['subnet', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('subnet') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'subnet', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('subnet', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'subnet', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('subnet', obj_ids['uuid']) + + try: + self._extension_mgrs['resourceApi'].map_method('post_subnet_create', obj_dict) + except Exception as e: + pass + + return {'subnet': rsp_body} + #end subnets_http_post + + def subnets_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'subnets', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('subnet', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'subnets', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'subnets': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('subnet', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('subnet', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('subnet', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'subnet_ip_prefix', u'id_perms', u'display_name'] + ['virtual_machine_interface_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('subnet', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('subnet', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'subnet': obj_dict}) + + return {'subnets': obj_dicts} + #end subnets_http_get + + def _subnet_create_default_children(self, parent_obj): + pass + #end _subnet_create_default_children + + def _subnet_delete_default_children(self, parent_dict): + pass + #end _subnet_delete_default_children + + def global_system_config_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_global_system_config_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'global_system_config': + abort(404, 'No global-system-config object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'global_system_config', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('global-system-config') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'global_system_config', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'autonomous_system', u'config_version', u'plugin_tuning', u'ibgp_auto_mesh', u'ip_fabric_subnets', u'id_perms', u'display_name'] + references = ['bgp_router_refs'] + back_references = [u'config_root_back_refs'] + children = [u'global_vrouter_configs', u'physical_routers', 'virtual_routers', u'config_nodes', u'analytics_nodes', u'database_nodes', u'service_appliance_sets'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('global-system-config', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'global_system_config', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'global_system_config', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('global-system-config', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_global_system_config_read', id, rsp_body) + except Exception as e: + pass + + return {'global-system-config': rsp_body} + #end global_system_config_http_get + + def global_system_config_http_put(self, id): + key = 'global-system-config' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_global_system_config_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'global_system_config': + abort(404, 'No global-system-config object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('plugin_tuning') + if prop_dict: + buf = cStringIO.StringIO() + xx_plugin_tuning = PluginProperties(**prop_dict) + xx_plugin_tuning.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_plugin_tuning = PluginProperties() + try: + xx_plugin_tuning.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('ip_fabric_subnets') + if prop_dict: + buf = cStringIO.StringIO() + xx_ip_fabric_subnets = SubnetListType(**prop_dict) + xx_ip_fabric_subnets.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_ip_fabric_subnets = SubnetListType() + try: + xx_ip_fabric_subnets.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'global_system_config', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'global_system_config', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = ['bgp_router', u'global_vrouter_config', u'physical_router', 'virtual_router', u'config_node', u'analytics_node', u'database_node', u'service_appliance_set'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('global-system-config') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'global_system_config', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('global-system-config', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'global_system_config', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('global-system-config', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_global_system_config_update', id, obj_dict) + except Exception as e: + pass + + return {'global-system-config': rsp_body} + #end global_system_config_http_put + + def global_system_config_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'global_system_config': + abort(404, 'No global-system-config object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_global_system_config_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'config_root_back_refs'] + children = [u'global_vrouter_configs', u'physical_routers', 'virtual_routers', u'config_nodes', u'analytics_nodes', u'database_nodes', u'service_appliance_sets'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('global-system-config', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'global_system_config', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'global_system_config', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'global_system_config', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('global-system-config', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('global-system-config') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + global_vrouter_configs = read_result.get('global_vrouter_configs', None) + if global_vrouter_configs: + has_infos = read_result['global_vrouter_configs'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-global-vrouter-config')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg) + abort(409, err_msg) + + physical_routers = read_result.get('physical_routers', None) + if physical_routers: + has_infos = read_result['physical_routers'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-physical-router')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg) + abort(409, err_msg) + + virtual_routers = read_result.get('virtual_routers', None) + if virtual_routers: + has_infos = read_result['virtual_routers'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-router')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg) + abort(409, err_msg) + + config_nodes = read_result.get('config_nodes', None) + if config_nodes: + has_infos = read_result['config_nodes'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-config-node')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg) + abort(409, err_msg) + + analytics_nodes = read_result.get('analytics_nodes', None) + if analytics_nodes: + has_infos = read_result['analytics_nodes'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-analytics-node')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg) + abort(409, err_msg) + + database_nodes = read_result.get('database_nodes', None) + if database_nodes: + has_infos = read_result['database_nodes'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-database-node')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg) + abort(409, err_msg) + + service_appliance_sets = read_result.get('service_appliance_sets', None) + if service_appliance_sets: + has_infos = read_result['service_appliance_sets'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-service-appliance-set')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'global_system_config', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._global_system_config_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'global_system_config', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('global-system-config', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'global_system_config', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_global_system_config_delete', id, read_result) + except Exception as e: + pass + + #end global_system_config_http_delete + + def global_system_configs_http_post(self): + key = 'global-system-config' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_global_system_config_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('plugin_tuning') + if prop_dict: + buf = cStringIO.StringIO() + xx_plugin_tuning = PluginProperties(**prop_dict) + xx_plugin_tuning.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_plugin_tuning = PluginProperties() + try: + xx_plugin_tuning.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('ip_fabric_subnets') + if prop_dict: + buf = cStringIO.StringIO() + xx_ip_fabric_subnets = SubnetListType(**prop_dict) + xx_ip_fabric_subnets.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_ip_fabric_subnets = SubnetListType() + try: + xx_ip_fabric_subnets.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'global-system-config', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'global_system_config', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'global_system_config', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = ['bgp_router', u'global_vrouter_config', u'physical_router', 'virtual_router', u'config_node', u'analytics_node', u'database_node', u'service_appliance_set'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('global-system-config', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'global_system_config', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['global_system_config', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('global-system-config') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'global_system_config', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('global-system-config', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'global_system_config', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('global-system-config', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_global_system_config_create', obj_dict) + except Exception as e: + pass + + return {'global-system-config': rsp_body} + #end global_system_configs_http_post + + def global_system_configs_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'global_system_configs', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('global-system-config', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'global_system_configs', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'global-system-configs': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('global-system-config', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('global-system-config', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('global-system-config', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'autonomous_system', u'config_version', u'plugin_tuning', u'ibgp_auto_mesh', u'ip_fabric_subnets', u'id_perms', u'display_name'] + ['bgp_router_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('global-system-config', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('global-system-config', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'global-system-config': obj_dict}) + + return {'global-system-configs': obj_dicts} + #end global_system_configs_http_get + + def _global_system_config_create_default_children(self, parent_obj): + # Create a default child only if provisioned for + r_class = self.get_resource_class('global-vrouter-config') + if r_class and r_class.generate_default_instance: + child_obj = GlobalVrouterConfig(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('global-vrouter-config') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('global-vrouter-config', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('global-vrouter-config', obj_ids, child_dict) + self._global_vrouter_config_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('physical-router') + if r_class and r_class.generate_default_instance: + child_obj = PhysicalRouter(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('physical-router') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('physical-router', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('physical-router', obj_ids, child_dict) + self._physical_router_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('virtual-router') + if r_class and r_class.generate_default_instance: + child_obj = VirtualRouter(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('virtual-router') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('virtual-router', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('virtual-router', obj_ids, child_dict) + self._virtual_router_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('config-node') + if r_class and r_class.generate_default_instance: + child_obj = ConfigNode(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('config-node') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('config-node', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('config-node', obj_ids, child_dict) + self._config_node_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('analytics-node') + if r_class and r_class.generate_default_instance: + child_obj = AnalyticsNode(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('analytics-node') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('analytics-node', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('analytics-node', obj_ids, child_dict) + self._analytics_node_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('database-node') + if r_class and r_class.generate_default_instance: + child_obj = DatabaseNode(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('database-node') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('database-node', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('database-node', obj_ids, child_dict) + self._database_node_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('service-appliance-set') + if r_class and r_class.generate_default_instance: + child_obj = ServiceApplianceSet(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('service-appliance-set') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('service-appliance-set', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('service-appliance-set', obj_ids, child_dict) + self._service_appliance_set_create_default_children(child_obj) + + pass + #end _global_system_config_create_default_children + + def _global_system_config_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('service-appliance-set') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('global_vrouter_configs') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-global-vrouter-config': + default_child_id = has_info['href'].split('/')[-1] + self.global_vrouter_config_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('service-appliance-set') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('physical_routers') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-physical-router': + default_child_id = has_info['href'].split('/')[-1] + self.physical_router_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('service-appliance-set') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('virtual_routers') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-virtual-router': + default_child_id = has_info['href'].split('/')[-1] + self.virtual_router_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('service-appliance-set') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('config_nodes') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-config-node': + default_child_id = has_info['href'].split('/')[-1] + self.config_node_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('service-appliance-set') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('analytics_nodes') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-analytics-node': + default_child_id = has_info['href'].split('/')[-1] + self.analytics_node_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('service-appliance-set') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('database_nodes') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-database-node': + default_child_id = has_info['href'].split('/')[-1] + self.database_node_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('service-appliance-set') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('service_appliance_sets') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-service-appliance-set': + default_child_id = has_info['href'].split('/')[-1] + self.service_appliance_set_http_delete(default_child_id) + break + + pass + #end _global_system_config_delete_default_children + + def service_appliance_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'service_appliance': + abort(404, 'No service-appliance object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'service_appliance', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('service-appliance') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'service_appliance', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'service_appliance_user_credentials', u'service_appliance_ip_address', u'service_appliance_properties', u'id_perms', u'display_name'] + references = [] + back_references = [u'service_appliance_set_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('service-appliance', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'service_appliance', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'service_appliance', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('service-appliance', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_service_appliance_read', id, rsp_body) + except Exception as e: + pass + + return {'service-appliance': rsp_body} + #end service_appliance_http_get + + def service_appliance_http_put(self, id): + key = 'service-appliance' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'service_appliance': + abort(404, 'No service-appliance object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('service_appliance_user_credentials') + if prop_dict: + buf = cStringIO.StringIO() + xx_service_appliance_user_credentials = UserCredentials(**prop_dict) + xx_service_appliance_user_credentials.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_service_appliance_user_credentials = UserCredentials() + try: + xx_service_appliance_user_credentials.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('service_appliance_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_service_appliance_properties = KeyValuePairs(**prop_dict) + xx_service_appliance_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_service_appliance_properties = KeyValuePairs() + try: + xx_service_appliance_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'service_appliance', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'service_appliance', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('service-appliance') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'service_appliance', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('service-appliance', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'service_appliance', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('service-appliance', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_service_appliance_update', id, obj_dict) + except Exception as e: + pass + + return {'service-appliance': rsp_body} + #end service_appliance_http_put + + def service_appliance_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'service_appliance': + abort(404, 'No service-appliance object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'service_appliance_set_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('service-appliance', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'service_appliance', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'service_appliance', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'service_appliance', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('service-appliance', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('service-appliance') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._service_appliance_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'service_appliance', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('service-appliance', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'service_appliance', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_service_appliance_delete', id, read_result) + except Exception as e: + pass + + #end service_appliance_http_delete + + def service_appliances_http_post(self): + key = 'service-appliance' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('service_appliance_user_credentials') + if prop_dict: + buf = cStringIO.StringIO() + xx_service_appliance_user_credentials = UserCredentials(**prop_dict) + xx_service_appliance_user_credentials.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_service_appliance_user_credentials = UserCredentials() + try: + xx_service_appliance_user_credentials.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('service_appliance_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_service_appliance_properties = KeyValuePairs(**prop_dict) + xx_service_appliance_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_service_appliance_properties = KeyValuePairs() + try: + xx_service_appliance_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'service-appliance', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'service_appliance', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'service_appliance', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('service-appliance', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'service_appliance', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['service_appliance', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('service-appliance') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'service_appliance', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('service-appliance', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'service_appliance', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('service-appliance', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_service_appliance_create', obj_dict) + except Exception as e: + pass + + return {'service-appliance': rsp_body} + #end service_appliances_http_post + + def service_appliances_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'service_appliances', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('service-appliance', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'service_appliances', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'service-appliances': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('service-appliance', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('service-appliance', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('service-appliance', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'service_appliance_user_credentials', u'service_appliance_ip_address', u'service_appliance_properties', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('service-appliance', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('service-appliance', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'service-appliance': obj_dict}) + + return {'service-appliances': obj_dicts} + #end service_appliances_http_get + + def _service_appliance_create_default_children(self, parent_obj): + pass + #end _service_appliance_create_default_children + + def _service_appliance_delete_default_children(self, parent_dict): + pass + #end _service_appliance_delete_default_children + + def service_instance_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_instance_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'service_instance': + abort(404, 'No service-instance object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'service_instance', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('service-instance') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'service_instance', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'service_instance_properties', u'id_perms', u'display_name'] + references = ['service_template_refs'] + back_references = [u'project_back_refs', u'virtual_machine_back_refs', u'logical_router_back_refs', u'loadbalancer_pool_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('service-instance', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'service_instance', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'service_instance', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('service-instance', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_service_instance_read', id, rsp_body) + except Exception as e: + pass + + return {'service-instance': rsp_body} + #end service_instance_http_get + + def service_instance_http_put(self, id): + key = 'service-instance' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_instance_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'service_instance': + abort(404, 'No service-instance object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('service_instance_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_service_instance_properties = ServiceInstanceType(**prop_dict) + xx_service_instance_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_service_instance_properties = ServiceInstanceType() + try: + xx_service_instance_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'service_instance', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'service_instance', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = ['service_template'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('service-instance') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'service_instance', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('service-instance', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'service_instance', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('service-instance', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_service_instance_update', id, obj_dict) + except Exception as e: + pass + + return {'service-instance': rsp_body} + #end service_instance_http_put + + def service_instance_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'service_instance': + abort(404, 'No service-instance object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_instance_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'project_back_refs', u'virtual_machine_back_refs', u'logical_router_back_refs', u'loadbalancer_pool_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('service-instance', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'service_instance', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'service_instance', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'service_instance', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('service-instance', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('service-instance') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + logical_router_back_refs = read_result.get('logical_router_back_refs', None) + if logical_router_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['logical_router_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'service_instance', 'http_delete', err_msg) + abort(409, err_msg) + + loadbalancer_pool_back_refs = read_result.get('loadbalancer_pool_back_refs', None) + if loadbalancer_pool_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['loadbalancer_pool_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'service_instance', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._service_instance_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'service_instance', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('service-instance', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'service_instance', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_service_instance_delete', id, read_result) + except Exception as e: + pass + + #end service_instance_http_delete + + def service_instances_http_post(self): + key = 'service-instance' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_instance_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('service_instance_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_service_instance_properties = ServiceInstanceType(**prop_dict) + xx_service_instance_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_service_instance_properties = ServiceInstanceType() + try: + xx_service_instance_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'service-instance', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'service_instance', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'service_instance', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = ['service_template'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('service-instance', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'service_instance', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['service_instance', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('service-instance') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'service_instance', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('service-instance', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'service_instance', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('service-instance', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_service_instance_create', obj_dict) + except Exception as e: + pass + + return {'service-instance': rsp_body} + #end service_instances_http_post + + def service_instances_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'service_instances', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('service-instance', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'service_instances', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'service-instances': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('service-instance', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('service-instance', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('service-instance', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'service_instance_properties', u'id_perms', u'display_name'] + ['service_template_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('service-instance', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('service-instance', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'service-instance': obj_dict}) + + return {'service-instances': obj_dicts} + #end service_instances_http_get + + def _service_instance_create_default_children(self, parent_obj): + pass + #end _service_instance_create_default_children + + def _service_instance_delete_default_children(self, parent_dict): + pass + #end _service_instance_delete_default_children + + def namespace_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_namespace_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'namespace': + abort(404, 'No namespace object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'namespace', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('namespace') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'namespace', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'namespace_cidr', u'id_perms', u'display_name'] + references = [] + back_references = [u'domain_back_refs', u'project_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('namespace', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'namespace', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'namespace', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('namespace', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_namespace_read', id, rsp_body) + except Exception as e: + pass + + return {'namespace': rsp_body} + #end namespace_http_get + + def namespace_http_put(self, id): + key = 'namespace' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_namespace_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'namespace': + abort(404, 'No namespace object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('namespace_cidr') + if prop_dict: + buf = cStringIO.StringIO() + xx_namespace_cidr = SubnetType(**prop_dict) + xx_namespace_cidr.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_namespace_cidr = SubnetType() + try: + xx_namespace_cidr.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'namespace', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'namespace', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('namespace') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'namespace', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('namespace', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'namespace', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('namespace', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_namespace_update', id, obj_dict) + except Exception as e: + pass + + return {'namespace': rsp_body} + #end namespace_http_put + + def namespace_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'namespace': + abort(404, 'No namespace object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_namespace_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'domain_back_refs', u'project_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('namespace', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'namespace', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'namespace', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'namespace', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('namespace', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('namespace') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + project_back_refs = read_result.get('project_back_refs', None) + if project_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['project_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'namespace', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._namespace_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'namespace', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('namespace', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'namespace', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_namespace_delete', id, read_result) + except Exception as e: + pass + + #end namespace_http_delete + + def namespaces_http_post(self): + key = 'namespace' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_namespace_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('namespace_cidr') + if prop_dict: + buf = cStringIO.StringIO() + xx_namespace_cidr = SubnetType(**prop_dict) + xx_namespace_cidr.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_namespace_cidr = SubnetType() + try: + xx_namespace_cidr.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'namespace', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'namespace', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'namespace', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('namespace', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'namespace', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['namespace', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('namespace') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'namespace', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('namespace', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'namespace', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('namespace', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_namespace_create', obj_dict) + except Exception as e: + pass + + return {'namespace': rsp_body} + #end namespaces_http_post + + def namespaces_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'namespaces', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('namespace', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'namespaces', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'namespaces': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('namespace', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('namespace', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('namespace', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'namespace_cidr', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('namespace', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('namespace', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'namespace': obj_dict}) + + return {'namespaces': obj_dicts} + #end namespaces_http_get + + def _namespace_create_default_children(self, parent_obj): + pass + #end _namespace_create_default_children + + def _namespace_delete_default_children(self, parent_dict): + pass + #end _namespace_delete_default_children + + def logical_interface_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_logical_interface_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'logical_interface': + abort(404, 'No logical-interface object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'logical_interface', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('logical-interface') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'logical_interface', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'logical_interface_vlan_tag', u'logical_interface_type', u'id_perms', u'display_name'] + references = ['virtual_machine_interface_refs'] + back_references = [u'physical_router_back_refs', u'physical_interface_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('logical-interface', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'logical_interface', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'logical_interface', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('logical-interface', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_logical_interface_read', id, rsp_body) + except Exception as e: + pass + + return {'logical-interface': rsp_body} + #end logical_interface_http_get + + def logical_interface_http_put(self, id): + key = 'logical-interface' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_logical_interface_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'logical_interface': + abort(404, 'No logical-interface object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'logical_interface', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'logical_interface', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = ['virtual_machine_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('logical-interface') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'logical_interface', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('logical-interface', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'logical_interface', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('logical-interface', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_logical_interface_update', id, obj_dict) + except Exception as e: + pass + + return {'logical-interface': rsp_body} + #end logical_interface_http_put + + def logical_interface_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'logical_interface': + abort(404, 'No logical-interface object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_logical_interface_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'physical_router_back_refs', u'physical_interface_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('logical-interface', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'logical_interface', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'logical_interface', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'logical_interface', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('logical-interface', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('logical-interface') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._logical_interface_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'logical_interface', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('logical-interface', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'logical_interface', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_logical_interface_delete', id, read_result) + except Exception as e: + pass + + #end logical_interface_http_delete + + def logical_interfaces_http_post(self): + key = 'logical-interface' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_logical_interface_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'logical-interface', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'logical_interface', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'logical_interface', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = ['virtual_machine_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('logical-interface', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'logical_interface', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['logical_interface', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('logical-interface') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'logical_interface', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('logical-interface', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'logical_interface', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('logical-interface', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_logical_interface_create', obj_dict) + except Exception as e: + pass + + return {'logical-interface': rsp_body} + #end logical_interfaces_http_post + + def logical_interfaces_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'logical_interfaces', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('logical-interface', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'logical_interfaces', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'logical-interfaces': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('logical-interface', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('logical-interface', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('logical-interface', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'logical_interface_vlan_tag', u'logical_interface_type', u'id_perms', u'display_name'] + ['virtual_machine_interface_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('logical-interface', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('logical-interface', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'logical-interface': obj_dict}) + + return {'logical-interfaces': obj_dicts} + #end logical_interfaces_http_get + + def _logical_interface_create_default_children(self, parent_obj): + pass + #end _logical_interface_create_default_children + + def _logical_interface_delete_default_children(self, parent_dict): + pass + #end _logical_interface_delete_default_children + + def route_table_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_route_table_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'route_table': + abort(404, 'No route-table object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'route_table', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('route-table') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'route_table', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'routes', u'id_perms', u'display_name'] + references = [] + back_references = [u'project_back_refs', u'virtual_network_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('route-table', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'route_table', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'route_table', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('route-table', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_route_table_read', id, rsp_body) + except Exception as e: + pass + + return {'route-table': rsp_body} + #end route_table_http_get + + def route_table_http_put(self, id): + key = 'route-table' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_route_table_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'route_table': + abort(404, 'No route-table object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('routes') + if prop_dict: + buf = cStringIO.StringIO() + xx_routes = RouteTableType(**prop_dict) + xx_routes.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_routes = RouteTableType() + try: + xx_routes.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'route_table', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'route_table', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('route-table') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'route_table', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('route-table', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'route_table', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('route-table', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_route_table_update', id, obj_dict) + except Exception as e: + pass + + return {'route-table': rsp_body} + #end route_table_http_put + + def route_table_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'route_table': + abort(404, 'No route-table object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_route_table_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'project_back_refs', u'virtual_network_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('route-table', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'route_table', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'route_table', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'route_table', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('route-table', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('route-table') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + virtual_network_back_refs = read_result.get('virtual_network_back_refs', None) + if virtual_network_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_network_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'route_table', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._route_table_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'route_table', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('route-table', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'route_table', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_route_table_delete', id, read_result) + except Exception as e: + pass + + #end route_table_http_delete + + def route_tables_http_post(self): + key = 'route-table' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_route_table_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('routes') + if prop_dict: + buf = cStringIO.StringIO() + xx_routes = RouteTableType(**prop_dict) + xx_routes.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_routes = RouteTableType() + try: + xx_routes.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'route-table', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'route_table', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'route_table', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('route-table', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'route_table', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['route_table', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('route-table') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'route_table', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('route-table', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'route_table', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('route-table', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_route_table_create', obj_dict) + except Exception as e: + pass + + return {'route-table': rsp_body} + #end route_tables_http_post + + def route_tables_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'route_tables', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('route-table', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'route_tables', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'route-tables': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('route-table', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('route-table', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('route-table', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'routes', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('route-table', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('route-table', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'route-table': obj_dict}) + + return {'route-tables': obj_dicts} + #end route_tables_http_get + + def _route_table_create_default_children(self, parent_obj): + pass + #end _route_table_create_default_children + + def _route_table_delete_default_children(self, parent_dict): + pass + #end _route_table_delete_default_children + + def physical_interface_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_physical_interface_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'physical_interface': + abort(404, 'No physical-interface object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'physical_interface', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('physical-interface') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'physical_interface', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'id_perms', u'display_name'] + references = [] + back_references = [u'physical_router_back_refs'] + children = [u'logical_interfaces'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('physical-interface', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'physical_interface', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'physical_interface', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('physical-interface', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_physical_interface_read', id, rsp_body) + except Exception as e: + pass + + return {'physical-interface': rsp_body} + #end physical_interface_http_get + + def physical_interface_http_put(self, id): + key = 'physical-interface' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_physical_interface_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'physical_interface': + abort(404, 'No physical-interface object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'physical_interface', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'physical_interface', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'logical_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('physical-interface') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'physical_interface', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('physical-interface', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'physical_interface', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('physical-interface', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_physical_interface_update', id, obj_dict) + except Exception as e: + pass + + return {'physical-interface': rsp_body} + #end physical_interface_http_put + + def physical_interface_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'physical_interface': + abort(404, 'No physical-interface object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_physical_interface_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'physical_router_back_refs'] + children = [u'logical_interfaces'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('physical-interface', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'physical_interface', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'physical_interface', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'physical_interface', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('physical-interface', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('physical-interface') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + logical_interfaces = read_result.get('logical_interfaces', None) + if logical_interfaces: + has_infos = read_result['logical_interfaces'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-logical-interface')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'physical_interface', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._physical_interface_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'physical_interface', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('physical-interface', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'physical_interface', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_physical_interface_delete', id, read_result) + except Exception as e: + pass + + #end physical_interface_http_delete + + def physical_interfaces_http_post(self): + key = 'physical-interface' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_physical_interface_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'physical-interface', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'physical_interface', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'physical_interface', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'logical_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('physical-interface', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'physical_interface', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['physical_interface', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('physical-interface') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'physical_interface', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('physical-interface', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'physical_interface', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('physical-interface', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_physical_interface_create', obj_dict) + except Exception as e: + pass + + return {'physical-interface': rsp_body} + #end physical_interfaces_http_post + + def physical_interfaces_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'physical_interfaces', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('physical-interface', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'physical_interfaces', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'physical-interfaces': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('physical-interface', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('physical-interface', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('physical-interface', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('physical-interface', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('physical-interface', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'physical-interface': obj_dict}) + + return {'physical-interfaces': obj_dicts} + #end physical_interfaces_http_get + + def _physical_interface_create_default_children(self, parent_obj): + # Create a default child only if provisioned for + r_class = self.get_resource_class('logical-interface') + if r_class and r_class.generate_default_instance: + child_obj = LogicalInterface(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('logical-interface') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('logical-interface', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('logical-interface', obj_ids, child_dict) + self._logical_interface_create_default_children(child_obj) + + pass + #end _physical_interface_create_default_children + + def _physical_interface_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('logical-interface') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('logical_interfaces') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-logical-interface': + default_child_id = has_info['href'].split('/')[-1] + self.logical_interface_http_delete(default_child_id) + break + + pass + #end _physical_interface_delete_default_children + + def access_control_list_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_access_control_list_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'access_control_list': + abort(404, 'No access-control-list object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'access_control_list', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('access-control-list') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'access_control_list', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'access_control_list_entries', u'id_perms', u'display_name'] + references = [] + back_references = [u'virtual_network_back_refs', u'security_group_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('access-control-list', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'access_control_list', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'access_control_list', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('access-control-list', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_access_control_list_read', id, rsp_body) + except Exception as e: + pass + + return {'access-control-list': rsp_body} + #end access_control_list_http_get + + def access_control_list_http_put(self, id): + key = 'access-control-list' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_access_control_list_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'access_control_list': + abort(404, 'No access-control-list object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('access_control_list_entries') + if prop_dict: + buf = cStringIO.StringIO() + xx_access_control_list_entries = AclEntriesType(**prop_dict) + xx_access_control_list_entries.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_access_control_list_entries = AclEntriesType() + try: + xx_access_control_list_entries.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'access_control_list', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'access_control_list', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('access-control-list') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'access_control_list', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('access-control-list', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'access_control_list', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('access-control-list', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_access_control_list_update', id, obj_dict) + except Exception as e: + pass + + return {'access-control-list': rsp_body} + #end access_control_list_http_put + + def access_control_list_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'access_control_list': + abort(404, 'No access-control-list object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_access_control_list_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'virtual_network_back_refs', u'security_group_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('access-control-list', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'access_control_list', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'access_control_list', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'access_control_list', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('access-control-list', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('access-control-list') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._access_control_list_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'access_control_list', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('access-control-list', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'access_control_list', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_access_control_list_delete', id, read_result) + except Exception as e: + pass + + #end access_control_list_http_delete + + def access_control_lists_http_post(self): + key = 'access-control-list' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_access_control_list_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('access_control_list_entries') + if prop_dict: + buf = cStringIO.StringIO() + xx_access_control_list_entries = AclEntriesType(**prop_dict) + xx_access_control_list_entries.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_access_control_list_entries = AclEntriesType() + try: + xx_access_control_list_entries.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'access-control-list', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'access_control_list', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'access_control_list', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('access-control-list', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'access_control_list', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['access_control_list', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('access-control-list') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'access_control_list', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('access-control-list', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'access_control_list', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('access-control-list', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_access_control_list_create', obj_dict) + except Exception as e: + pass + + return {'access-control-list': rsp_body} + #end access_control_lists_http_post + + def access_control_lists_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'access_control_lists', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('access-control-list', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'access_control_lists', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'access-control-lists': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('access-control-list', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('access-control-list', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('access-control-list', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'access_control_list_entries', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('access-control-list', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('access-control-list', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'access-control-list': obj_dict}) + + return {'access-control-lists': obj_dicts} + #end access_control_lists_http_get + + def _access_control_list_create_default_children(self, parent_obj): + pass + #end _access_control_list_create_default_children + + def _access_control_list_delete_default_children(self, parent_dict): + pass + #end _access_control_list_delete_default_children + + def analytics_node_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_analytics_node_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'analytics_node': + abort(404, 'No analytics-node object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'analytics_node', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('analytics-node') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'analytics_node', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'analytics_node_ip_address', u'id_perms', u'display_name'] + references = [] + back_references = [u'global_system_config_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('analytics-node', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'analytics_node', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'analytics_node', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('analytics-node', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_analytics_node_read', id, rsp_body) + except Exception as e: + pass + + return {'analytics-node': rsp_body} + #end analytics_node_http_get + + def analytics_node_http_put(self, id): + key = 'analytics-node' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_analytics_node_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'analytics_node': + abort(404, 'No analytics-node object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'analytics_node', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'analytics_node', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('analytics-node') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'analytics_node', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('analytics-node', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'analytics_node', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('analytics-node', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_analytics_node_update', id, obj_dict) + except Exception as e: + pass + + return {'analytics-node': rsp_body} + #end analytics_node_http_put + + def analytics_node_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'analytics_node': + abort(404, 'No analytics-node object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_analytics_node_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'global_system_config_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('analytics-node', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'analytics_node', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'analytics_node', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'analytics_node', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('analytics-node', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('analytics-node') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._analytics_node_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'analytics_node', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('analytics-node', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'analytics_node', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_analytics_node_delete', id, read_result) + except Exception as e: + pass + + #end analytics_node_http_delete + + def analytics_nodes_http_post(self): + key = 'analytics-node' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_analytics_node_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'analytics-node', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'analytics_node', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'analytics_node', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('analytics-node', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'analytics_node', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['analytics_node', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('analytics-node') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'analytics_node', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('analytics-node', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'analytics_node', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('analytics-node', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_analytics_node_create', obj_dict) + except Exception as e: + pass + + return {'analytics-node': rsp_body} + #end analytics_nodes_http_post + + def analytics_nodes_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'analytics_nodes', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('analytics-node', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'analytics_nodes', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'analytics-nodes': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('analytics-node', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('analytics-node', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('analytics-node', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'analytics_node_ip_address', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('analytics-node', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('analytics-node', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'analytics-node': obj_dict}) + + return {'analytics-nodes': obj_dicts} + #end analytics_nodes_http_get + + def _analytics_node_create_default_children(self, parent_obj): + pass + #end _analytics_node_create_default_children + + def _analytics_node_delete_default_children(self, parent_dict): + pass + #end _analytics_node_delete_default_children + + def virtual_DNS_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'virtual_DNS': + abort(404, 'No virtual-DNS object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_DNS', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('virtual-DNS') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_DNS', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'virtual_DNS_data', u'id_perms', u'display_name'] + references = [] + back_references = [u'domain_back_refs', u'network_ipam_back_refs'] + children = [u'virtual_DNS_records'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('virtual-DNS', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_DNS', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'virtual_DNS', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-DNS', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_read', id, rsp_body) + except Exception as e: + pass + + return {'virtual-DNS': rsp_body} + #end virtual_DNS_http_get + + def virtual_DNS_http_put(self, id): + key = 'virtual-DNS' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_DNS': + abort(404, 'No virtual-DNS object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('virtual_DNS_data') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_DNS_data = VirtualDnsType(**prop_dict) + xx_virtual_DNS_data.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_DNS_data = VirtualDnsType() + try: + xx_virtual_DNS_data.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'virtual_DNS', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_DNS', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'virtual_DNS_record'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('virtual-DNS') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'virtual_DNS', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('virtual-DNS', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_DNS', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-DNS', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_update', id, obj_dict) + except Exception as e: + pass + + return {'virtual-DNS': rsp_body} + #end virtual_DNS_http_put + + def virtual_DNS_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_DNS': + abort(404, 'No virtual-DNS object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'domain_back_refs', u'network_ipam_back_refs'] + children = [u'virtual_DNS_records'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('virtual-DNS', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'virtual_DNS', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'virtual_DNS', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_DNS', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-DNS', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('virtual-DNS') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + virtual_DNS_records = read_result.get('virtual_DNS_records', None) + if virtual_DNS_records: + has_infos = read_result['virtual_DNS_records'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-DNS-record')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'virtual_DNS', 'http_delete', err_msg) + abort(409, err_msg) + + network_ipam_back_refs = read_result.get('network_ipam_back_refs', None) + if network_ipam_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['network_ipam_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_DNS', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._virtual_DNS_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_DNS', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('virtual-DNS', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_DNS', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_delete', id, read_result) + except Exception as e: + pass + + #end virtual_DNS_http_delete + + def virtual_DNSs_http_post(self): + key = 'virtual-DNS' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_DNS_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('virtual_DNS_data') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_DNS_data = VirtualDnsType(**prop_dict) + xx_virtual_DNS_data.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_DNS_data = VirtualDnsType() + try: + xx_virtual_DNS_data.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'virtual-DNS', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'virtual_DNS', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'virtual_DNS', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'virtual_DNS_record'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('virtual-DNS', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'virtual_DNS', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['virtual_DNS', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('virtual-DNS') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_DNS', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('virtual-DNS', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_DNS', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('virtual-DNS', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_DNS_create', obj_dict) + except Exception as e: + pass + + return {'virtual-DNS': rsp_body} + #end virtual_DNSs_http_post + + def virtual_DNSs_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'virtual_DNSs', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('virtual-DNS', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'virtual_DNSs', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'virtual-DNSs': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('virtual-DNS', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('virtual-DNS', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('virtual-DNS', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'virtual_DNS_data', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('virtual-DNS', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('virtual-DNS', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'virtual-DNS': obj_dict}) + + return {'virtual-DNSs': obj_dicts} + #end virtual_DNSs_http_get + + def _virtual_DNS_create_default_children(self, parent_obj): + # Create a default child only if provisioned for + r_class = self.get_resource_class('virtual-DNS-record') + if r_class and r_class.generate_default_instance: + child_obj = VirtualDnsRecord(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('virtual-DNS-record') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('virtual-DNS-record', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('virtual-DNS-record', obj_ids, child_dict) + self._virtual_DNS_record_create_default_children(child_obj) + + pass + #end _virtual_DNS_create_default_children + + def _virtual_DNS_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-DNS-record') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('virtual_DNS_records') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-virtual-DNS-record': + default_child_id = has_info['href'].split('/')[-1] + self.virtual_DNS_record_http_delete(default_child_id) + break + + pass + #end _virtual_DNS_delete_default_children + + def customer_attachment_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_customer_attachment_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'customer_attachment': + abort(404, 'No customer-attachment object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'customer_attachment', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('customer-attachment') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'customer_attachment', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'attachment_address', u'id_perms', u'display_name'] + references = ['virtual_machine_interface_refs', u'floating_ip_refs'] + back_references = [] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('customer-attachment', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'customer_attachment', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'customer_attachment', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('customer-attachment', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_customer_attachment_read', id, rsp_body) + except Exception as e: + pass + + return {'customer-attachment': rsp_body} + #end customer_attachment_http_get + + def customer_attachment_http_put(self, id): + key = 'customer-attachment' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_customer_attachment_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'customer_attachment': + abort(404, 'No customer-attachment object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('attachment_address') + if prop_dict: + buf = cStringIO.StringIO() + xx_attachment_address = AttachmentAddressType(**prop_dict) + xx_attachment_address.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_attachment_address = AttachmentAddressType() + try: + xx_attachment_address.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'customer_attachment', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'customer_attachment', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = ['virtual_machine_interface', u'floating_ip', 'routing_instance', 'provider_attachment'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('customer-attachment') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'customer_attachment', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('customer-attachment', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'customer_attachment', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('customer-attachment', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_customer_attachment_update', id, obj_dict) + except Exception as e: + pass + + return {'customer-attachment': rsp_body} + #end customer_attachment_http_put + + def customer_attachment_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'customer_attachment': + abort(404, 'No customer-attachment object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_customer_attachment_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('customer-attachment', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'customer_attachment', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'customer_attachment', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'customer_attachment', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('customer-attachment', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('customer-attachment') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._customer_attachment_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'customer_attachment', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('customer-attachment', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'customer_attachment', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_customer_attachment_delete', id, read_result) + except Exception as e: + pass + + #end customer_attachment_http_delete + + def customer_attachments_http_post(self): + key = 'customer-attachment' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_customer_attachment_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('attachment_address') + if prop_dict: + buf = cStringIO.StringIO() + xx_attachment_address = AttachmentAddressType(**prop_dict) + xx_attachment_address.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_attachment_address = AttachmentAddressType() + try: + xx_attachment_address.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'customer-attachment', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'customer_attachment', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # Validate perms + objtype_list = ['virtual_machine_interface', u'floating_ip', 'routing_instance', 'provider_attachment'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('customer-attachment', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'customer_attachment', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['customer_attachment', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('customer-attachment') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'customer_attachment', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('customer-attachment', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'customer_attachment', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('customer-attachment', obj_ids['uuid']) + + try: + self._extension_mgrs['resourceApi'].map_method('post_customer_attachment_create', obj_dict) + except Exception as e: + pass + + return {'customer-attachment': rsp_body} + #end customer_attachments_http_post + + def customer_attachments_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'customer_attachments', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('customer-attachment', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'customer_attachments', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'customer-attachments': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('customer-attachment', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('customer-attachment', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('customer-attachment', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'attachment_address', u'id_perms', u'display_name'] + ['virtual_machine_interface_refs', u'floating_ip_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('customer-attachment', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('customer-attachment', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'customer-attachment': obj_dict}) + + return {'customer-attachments': obj_dicts} + #end customer_attachments_http_get + + def _customer_attachment_create_default_children(self, parent_obj): + pass + #end _customer_attachment_create_default_children + + def _customer_attachment_delete_default_children(self, parent_dict): + pass + #end _customer_attachment_delete_default_children + + def service_appliance_set_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_set_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'service_appliance_set': + abort(404, 'No service-appliance-set object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'service_appliance_set', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('service-appliance-set') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'service_appliance_set', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'service_appliance_set_properties', u'service_appliance_driver', u'service_appliance_ha_mode', u'id_perms', u'display_name'] + references = [] + back_references = [u'global_system_config_back_refs', u'loadbalancer_pool_back_refs'] + children = [u'service_appliances'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('service-appliance-set', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'service_appliance_set', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'service_appliance_set', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('service-appliance-set', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_service_appliance_set_read', id, rsp_body) + except Exception as e: + pass + + return {'service-appliance-set': rsp_body} + #end service_appliance_set_http_get + + def service_appliance_set_http_put(self, id): + key = 'service-appliance-set' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_set_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'service_appliance_set': + abort(404, 'No service-appliance-set object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('service_appliance_set_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_service_appliance_set_properties = KeyValuePairs(**prop_dict) + xx_service_appliance_set_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_service_appliance_set_properties = KeyValuePairs() + try: + xx_service_appliance_set_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'service_appliance_set', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'service_appliance_set', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'service_appliance'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('service-appliance-set') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'service_appliance_set', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('service-appliance-set', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'service_appliance_set', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('service-appliance-set', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_service_appliance_set_update', id, obj_dict) + except Exception as e: + pass + + return {'service-appliance-set': rsp_body} + #end service_appliance_set_http_put + + def service_appliance_set_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'service_appliance_set': + abort(404, 'No service-appliance-set object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_set_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'global_system_config_back_refs', u'loadbalancer_pool_back_refs'] + children = [u'service_appliances'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('service-appliance-set', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'service_appliance_set', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'service_appliance_set', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'service_appliance_set', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('service-appliance-set', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('service-appliance-set') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + service_appliances = read_result.get('service_appliances', None) + if service_appliances: + has_infos = read_result['service_appliances'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-service-appliance')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'service_appliance_set', 'http_delete', err_msg) + abort(409, err_msg) + + loadbalancer_pool_back_refs = read_result.get('loadbalancer_pool_back_refs', None) + if loadbalancer_pool_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['loadbalancer_pool_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'service_appliance_set', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._service_appliance_set_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'service_appliance_set', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('service-appliance-set', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'service_appliance_set', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_service_appliance_set_delete', id, read_result) + except Exception as e: + pass + + #end service_appliance_set_http_delete + + def service_appliance_sets_http_post(self): + key = 'service-appliance-set' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_appliance_set_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('service_appliance_set_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_service_appliance_set_properties = KeyValuePairs(**prop_dict) + xx_service_appliance_set_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_service_appliance_set_properties = KeyValuePairs() + try: + xx_service_appliance_set_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'service-appliance-set', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'service_appliance_set', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'service_appliance_set', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'service_appliance'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('service-appliance-set', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'service_appliance_set', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['service_appliance_set', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('service-appliance-set') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'service_appliance_set', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('service-appliance-set', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'service_appliance_set', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('service-appliance-set', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_service_appliance_set_create', obj_dict) + except Exception as e: + pass + + return {'service-appliance-set': rsp_body} + #end service_appliance_sets_http_post + + def service_appliance_sets_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'service_appliance_sets', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('service-appliance-set', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'service_appliance_sets', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'service-appliance-sets': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('service-appliance-set', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('service-appliance-set', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('service-appliance-set', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'service_appliance_set_properties', u'service_appliance_driver', u'service_appliance_ha_mode', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('service-appliance-set', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('service-appliance-set', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'service-appliance-set': obj_dict}) + + return {'service-appliance-sets': obj_dicts} + #end service_appliance_sets_http_get + + def _service_appliance_set_create_default_children(self, parent_obj): + # Create a default child only if provisioned for + r_class = self.get_resource_class('service-appliance') + if r_class and r_class.generate_default_instance: + child_obj = ServiceAppliance(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('service-appliance') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('service-appliance', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('service-appliance', obj_ids, child_dict) + self._service_appliance_create_default_children(child_obj) + + pass + #end _service_appliance_set_create_default_children + + def _service_appliance_set_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('service-appliance') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('service_appliances') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-service-appliance': + default_child_id = has_info['href'].split('/')[-1] + self.service_appliance_http_delete(default_child_id) + break + + pass + #end _service_appliance_set_delete_default_children + + def config_node_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_config_node_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'config_node': + abort(404, 'No config-node object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'config_node', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('config-node') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'config_node', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'config_node_ip_address', u'id_perms', u'display_name'] + references = [] + back_references = [u'global_system_config_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('config-node', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'config_node', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'config_node', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('config-node', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_config_node_read', id, rsp_body) + except Exception as e: + pass + + return {'config-node': rsp_body} + #end config_node_http_get + + def config_node_http_put(self, id): + key = 'config-node' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_config_node_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'config_node': + abort(404, 'No config-node object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'config_node', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'config_node', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('config-node') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'config_node', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('config-node', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'config_node', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('config-node', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_config_node_update', id, obj_dict) + except Exception as e: + pass + + return {'config-node': rsp_body} + #end config_node_http_put + + def config_node_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'config_node': + abort(404, 'No config-node object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_config_node_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'global_system_config_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('config-node', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'config_node', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'config_node', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'config_node', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('config-node', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('config-node') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._config_node_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'config_node', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('config-node', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'config_node', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_config_node_delete', id, read_result) + except Exception as e: + pass + + #end config_node_http_delete + + def config_nodes_http_post(self): + key = 'config-node' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_config_node_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'config-node', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'config_node', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'config_node', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('config-node', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'config_node', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['config_node', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('config-node') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'config_node', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('config-node', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'config_node', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('config-node', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_config_node_create', obj_dict) + except Exception as e: + pass + + return {'config-node': rsp_body} + #end config_nodes_http_post + + def config_nodes_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'config_nodes', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('config-node', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'config_nodes', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'config-nodes': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('config-node', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('config-node', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('config-node', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'config_node_ip_address', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('config-node', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('config-node', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'config-node': obj_dict}) + + return {'config-nodes': obj_dicts} + #end config_nodes_http_get + + def _config_node_create_default_children(self, parent_obj): + pass + #end _config_node_create_default_children + + def _config_node_delete_default_children(self, parent_dict): + pass + #end _config_node_delete_default_children + + def qos_queue_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_qos_queue_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'qos_queue': + abort(404, 'No qos-queue object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'qos_queue', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('qos-queue') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'qos_queue', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'min_bandwidth', u'max_bandwidth', u'id_perms', u'display_name'] + references = [] + back_references = [u'project_back_refs', u'qos_forwarding_class_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('qos-queue', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'qos_queue', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'qos_queue', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('qos-queue', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_qos_queue_read', id, rsp_body) + except Exception as e: + pass + + return {'qos-queue': rsp_body} + #end qos_queue_http_get + + def qos_queue_http_put(self, id): + key = 'qos-queue' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_qos_queue_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'qos_queue': + abort(404, 'No qos-queue object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'qos_queue', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'qos_queue', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('qos-queue') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'qos_queue', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('qos-queue', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'qos_queue', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('qos-queue', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_qos_queue_update', id, obj_dict) + except Exception as e: + pass + + return {'qos-queue': rsp_body} + #end qos_queue_http_put + + def qos_queue_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'qos_queue': + abort(404, 'No qos-queue object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_qos_queue_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'project_back_refs', u'qos_forwarding_class_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('qos-queue', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'qos_queue', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'qos_queue', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'qos_queue', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('qos-queue', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('qos-queue') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + qos_forwarding_class_back_refs = read_result.get('qos_forwarding_class_back_refs', None) + if qos_forwarding_class_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['qos_forwarding_class_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'qos_queue', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._qos_queue_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'qos_queue', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('qos-queue', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'qos_queue', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_qos_queue_delete', id, read_result) + except Exception as e: + pass + + #end qos_queue_http_delete + + def qos_queues_http_post(self): + key = 'qos-queue' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_qos_queue_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'qos-queue', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'qos_queue', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'qos_queue', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('qos-queue', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'qos_queue', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['qos_queue', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('qos-queue') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'qos_queue', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('qos-queue', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'qos_queue', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('qos-queue', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_qos_queue_create', obj_dict) + except Exception as e: + pass + + return {'qos-queue': rsp_body} + #end qos_queues_http_post + + def qos_queues_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'qos_queues', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('qos-queue', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'qos_queues', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'qos-queues': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('qos-queue', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('qos-queue', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('qos-queue', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'min_bandwidth', u'max_bandwidth', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('qos-queue', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('qos-queue', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'qos-queue': obj_dict}) + + return {'qos-queues': obj_dicts} + #end qos_queues_http_get + + def _qos_queue_create_default_children(self, parent_obj): + pass + #end _qos_queue_create_default_children + + def _qos_queue_delete_default_children(self, parent_dict): + pass + #end _qos_queue_delete_default_children + + def virtual_machine_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'virtual_machine': + abort(404, 'No virtual-machine object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_machine', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('virtual-machine') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_machine', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'id_perms', u'display_name'] + references = [u'service_instance_refs'] + back_references = ['virtual_machine_interface_back_refs', 'virtual_router_back_refs'] + children = ['virtual_machine_interfaces'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('virtual-machine', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_machine', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'virtual_machine', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-machine', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_read', id, rsp_body) + except Exception as e: + pass + + return {'virtual-machine': rsp_body} + #end virtual_machine_http_get + + def virtual_machine_http_put(self, id): + key = 'virtual-machine' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_machine': + abort(404, 'No virtual-machine object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'virtual_machine', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_machine', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = ['virtual_machine_interface', u'service_instance'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('virtual-machine') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'virtual_machine', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('virtual-machine', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_machine', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-machine', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_update', id, obj_dict) + except Exception as e: + pass + + return {'virtual-machine': rsp_body} + #end virtual_machine_http_put + + def virtual_machine_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_machine': + abort(404, 'No virtual-machine object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = ['virtual_machine_interface_back_refs', 'virtual_router_back_refs'] + children = ['virtual_machine_interfaces'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('virtual-machine', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'virtual_machine', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'virtual_machine', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_machine', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-machine', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('virtual-machine') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + virtual_machine_interfaces = read_result.get('virtual_machine_interfaces', None) + if virtual_machine_interfaces: + has_infos = read_result['virtual_machine_interfaces'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-machine-interface')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'virtual_machine', 'http_delete', err_msg) + abort(409, err_msg) + + virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None) + if virtual_machine_interface_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_machine', 'http_delete', err_msg) + abort(409, err_msg) + + virtual_router_back_refs = read_result.get('virtual_router_back_refs', None) + if virtual_router_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_router_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_machine', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._virtual_machine_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_machine', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('virtual-machine', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_machine', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_delete', id, read_result) + except Exception as e: + pass + + #end virtual_machine_http_delete + + def virtual_machines_http_post(self): + key = 'virtual-machine' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'virtual-machine', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'virtual_machine', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # Validate perms + objtype_list = ['virtual_machine_interface', u'service_instance'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('virtual-machine', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'virtual_machine', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['virtual_machine', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('virtual-machine') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_machine', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('virtual-machine', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_machine', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('virtual-machine', obj_ids['uuid']) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_create', obj_dict) + except Exception as e: + pass + + return {'virtual-machine': rsp_body} + #end virtual_machines_http_post + + def virtual_machines_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'virtual_machines', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('virtual-machine', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'virtual_machines', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'virtual-machines': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('virtual-machine', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('virtual-machine', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('virtual-machine', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms', u'display_name'] + [u'service_instance_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('virtual-machine', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('virtual-machine', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'virtual-machine': obj_dict}) + + return {'virtual-machines': obj_dicts} + #end virtual_machines_http_get + + def _virtual_machine_create_default_children(self, parent_obj): + # Create a default child only if provisioned for + r_class = self.get_resource_class('virtual-machine-interface') + if r_class and r_class.generate_default_instance: + child_obj = VirtualMachineInterface(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('virtual-machine-interface') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('virtual-machine-interface', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('virtual-machine-interface', obj_ids, child_dict) + self._virtual_machine_interface_create_default_children(child_obj) + + pass + #end _virtual_machine_create_default_children + + def _virtual_machine_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-machine-interface') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('virtual_machine_interfaces') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-virtual-machine-interface': + default_child_id = has_info['href'].split('/')[-1] + self.virtual_machine_interface_http_delete(default_child_id) + break + + pass + #end _virtual_machine_delete_default_children + + def interface_route_table_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_interface_route_table_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'interface_route_table': + abort(404, 'No interface-route-table object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'interface_route_table', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('interface-route-table') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'interface_route_table', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'interface_route_table_routes', u'id_perms', u'display_name'] + references = [] + back_references = [u'project_back_refs', 'virtual_machine_interface_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('interface-route-table', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'interface_route_table', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'interface_route_table', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('interface-route-table', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_interface_route_table_read', id, rsp_body) + except Exception as e: + pass + + return {'interface-route-table': rsp_body} + #end interface_route_table_http_get + + def interface_route_table_http_put(self, id): + key = 'interface-route-table' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_interface_route_table_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'interface_route_table': + abort(404, 'No interface-route-table object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('interface_route_table_routes') + if prop_dict: + buf = cStringIO.StringIO() + xx_interface_route_table_routes = RouteTableType(**prop_dict) + xx_interface_route_table_routes.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_interface_route_table_routes = RouteTableType() + try: + xx_interface_route_table_routes.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'interface_route_table', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'interface_route_table', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('interface-route-table') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'interface_route_table', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('interface-route-table', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'interface_route_table', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('interface-route-table', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_interface_route_table_update', id, obj_dict) + except Exception as e: + pass + + return {'interface-route-table': rsp_body} + #end interface_route_table_http_put + + def interface_route_table_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'interface_route_table': + abort(404, 'No interface-route-table object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_interface_route_table_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'project_back_refs', 'virtual_machine_interface_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('interface-route-table', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'interface_route_table', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'interface_route_table', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'interface_route_table', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('interface-route-table', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('interface-route-table') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None) + if virtual_machine_interface_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'interface_route_table', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._interface_route_table_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'interface_route_table', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('interface-route-table', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'interface_route_table', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_interface_route_table_delete', id, read_result) + except Exception as e: + pass + + #end interface_route_table_http_delete + + def interface_route_tables_http_post(self): + key = 'interface-route-table' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_interface_route_table_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('interface_route_table_routes') + if prop_dict: + buf = cStringIO.StringIO() + xx_interface_route_table_routes = RouteTableType(**prop_dict) + xx_interface_route_table_routes.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_interface_route_table_routes = RouteTableType() + try: + xx_interface_route_table_routes.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'interface-route-table', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'interface_route_table', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'interface_route_table', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('interface-route-table', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'interface_route_table', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['interface_route_table', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('interface-route-table') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'interface_route_table', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('interface-route-table', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'interface_route_table', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('interface-route-table', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_interface_route_table_create', obj_dict) + except Exception as e: + pass + + return {'interface-route-table': rsp_body} + #end interface_route_tables_http_post + + def interface_route_tables_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'interface_route_tables', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('interface-route-table', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'interface_route_tables', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'interface-route-tables': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('interface-route-table', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('interface-route-table', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('interface-route-table', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'interface_route_table_routes', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('interface-route-table', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('interface-route-table', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'interface-route-table': obj_dict}) + + return {'interface-route-tables': obj_dicts} + #end interface_route_tables_http_get + + def _interface_route_table_create_default_children(self, parent_obj): + pass + #end _interface_route_table_create_default_children + + def _interface_route_table_delete_default_children(self, parent_dict): + pass + #end _interface_route_table_delete_default_children + + def service_template_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_template_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'service_template': + abort(404, 'No service-template object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'service_template', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('service-template') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'service_template', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'service_template_properties', u'id_perms', u'display_name'] + references = [] + back_references = [u'domain_back_refs', u'service_instance_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('service-template', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'service_template', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'service_template', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('service-template', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_service_template_read', id, rsp_body) + except Exception as e: + pass + + return {'service-template': rsp_body} + #end service_template_http_get + + def service_template_http_put(self, id): + key = 'service-template' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_template_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'service_template': + abort(404, 'No service-template object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('service_template_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_service_template_properties = ServiceTemplateType(**prop_dict) + xx_service_template_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_service_template_properties = ServiceTemplateType() + try: + xx_service_template_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'service_template', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'service_template', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('service-template') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'service_template', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('service-template', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'service_template', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('service-template', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_service_template_update', id, obj_dict) + except Exception as e: + pass + + return {'service-template': rsp_body} + #end service_template_http_put + + def service_template_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'service_template': + abort(404, 'No service-template object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_template_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'domain_back_refs', u'service_instance_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('service-template', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'service_template', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'service_template', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'service_template', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('service-template', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('service-template') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + service_instance_back_refs = read_result.get('service_instance_back_refs', None) + if service_instance_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['service_instance_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'service_template', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._service_template_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'service_template', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('service-template', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'service_template', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_service_template_delete', id, read_result) + except Exception as e: + pass + + #end service_template_http_delete + + def service_templates_http_post(self): + key = 'service-template' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_service_template_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('service_template_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_service_template_properties = ServiceTemplateType(**prop_dict) + xx_service_template_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_service_template_properties = ServiceTemplateType() + try: + xx_service_template_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'service-template', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'service_template', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'service_template', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('service-template', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'service_template', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['service_template', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('service-template') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'service_template', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('service-template', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'service_template', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('service-template', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_service_template_create', obj_dict) + except Exception as e: + pass + + return {'service-template': rsp_body} + #end service_templates_http_post + + def service_templates_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'service_templates', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('service-template', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'service_templates', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'service-templates': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('service-template', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('service-template', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('service-template', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'service_template_properties', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('service-template', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('service-template', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'service-template': obj_dict}) + + return {'service-templates': obj_dicts} + #end service_templates_http_get + + def _service_template_create_default_children(self, parent_obj): + pass + #end _service_template_create_default_children + + def _service_template_delete_default_children(self, parent_dict): + pass + #end _service_template_delete_default_children + + def virtual_ip_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_ip_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'virtual_ip': + abort(404, 'No virtual-ip object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_ip', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('virtual-ip') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_ip', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'virtual_ip_properties', u'id_perms', u'display_name'] + references = [u'loadbalancer_pool_refs', 'virtual_machine_interface_refs'] + back_references = [u'project_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('virtual-ip', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_ip', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'virtual_ip', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-ip', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_ip_read', id, rsp_body) + except Exception as e: + pass + + return {'virtual-ip': rsp_body} + #end virtual_ip_http_get + + def virtual_ip_http_put(self, id): + key = 'virtual-ip' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_ip_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_ip': + abort(404, 'No virtual-ip object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('virtual_ip_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_ip_properties = VirtualIpType(**prop_dict) + xx_virtual_ip_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_ip_properties = VirtualIpType() + try: + xx_virtual_ip_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'virtual_ip', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_ip', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'loadbalancer_pool', 'virtual_machine_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('virtual-ip') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'virtual_ip', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('virtual-ip', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_ip', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-ip', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_ip_update', id, obj_dict) + except Exception as e: + pass + + return {'virtual-ip': rsp_body} + #end virtual_ip_http_put + + def virtual_ip_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_ip': + abort(404, 'No virtual-ip object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_ip_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'project_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('virtual-ip', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'virtual_ip', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'virtual_ip', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_ip', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-ip', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('virtual-ip') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._virtual_ip_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_ip', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('virtual-ip', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_ip', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_ip_delete', id, read_result) + except Exception as e: + pass + + #end virtual_ip_http_delete + + def virtual_ips_http_post(self): + key = 'virtual-ip' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_ip_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('virtual_ip_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_ip_properties = VirtualIpType(**prop_dict) + xx_virtual_ip_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_ip_properties = VirtualIpType() + try: + xx_virtual_ip_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'virtual-ip', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'virtual_ip', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'virtual_ip', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'loadbalancer_pool', 'virtual_machine_interface'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('virtual-ip', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'virtual_ip', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['virtual_ip', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('virtual-ip') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_ip', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('virtual-ip', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_ip', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('virtual-ip', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_ip_create', obj_dict) + except Exception as e: + pass + + return {'virtual-ip': rsp_body} + #end virtual_ips_http_post + + def virtual_ips_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'virtual_ips', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('virtual-ip', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'virtual_ips', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'virtual-ips': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('virtual-ip', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('virtual-ip', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('virtual-ip', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'virtual_ip_properties', u'id_perms', u'display_name'] + [u'loadbalancer_pool_refs', 'virtual_machine_interface_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('virtual-ip', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('virtual-ip', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'virtual-ip': obj_dict}) + + return {'virtual-ips': obj_dicts} + #end virtual_ips_http_get + + def _virtual_ip_create_default_children(self, parent_obj): + pass + #end _virtual_ip_create_default_children + + def _virtual_ip_delete_default_children(self, parent_dict): + pass + #end _virtual_ip_delete_default_children + + def loadbalancer_member_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_member_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'loadbalancer_member': + abort(404, 'No loadbalancer-member object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'loadbalancer_member', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('loadbalancer-member') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'loadbalancer_member', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'loadbalancer_member_properties', u'id_perms', u'display_name'] + references = [] + back_references = [u'loadbalancer_pool_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('loadbalancer-member', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'loadbalancer_member', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'loadbalancer_member', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('loadbalancer-member', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_member_read', id, rsp_body) + except Exception as e: + pass + + return {'loadbalancer-member': rsp_body} + #end loadbalancer_member_http_get + + def loadbalancer_member_http_put(self, id): + key = 'loadbalancer-member' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_member_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'loadbalancer_member': + abort(404, 'No loadbalancer-member object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('loadbalancer_member_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_loadbalancer_member_properties = LoadbalancerMemberType(**prop_dict) + xx_loadbalancer_member_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_loadbalancer_member_properties = LoadbalancerMemberType() + try: + xx_loadbalancer_member_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'loadbalancer_member', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'loadbalancer_member', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('loadbalancer-member') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'loadbalancer_member', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('loadbalancer-member', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'loadbalancer_member', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('loadbalancer-member', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_member_update', id, obj_dict) + except Exception as e: + pass + + return {'loadbalancer-member': rsp_body} + #end loadbalancer_member_http_put + + def loadbalancer_member_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'loadbalancer_member': + abort(404, 'No loadbalancer-member object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_member_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'loadbalancer_pool_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('loadbalancer-member', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'loadbalancer_member', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'loadbalancer_member', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'loadbalancer_member', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('loadbalancer-member', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('loadbalancer-member') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._loadbalancer_member_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'loadbalancer_member', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('loadbalancer-member', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'loadbalancer_member', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_member_delete', id, read_result) + except Exception as e: + pass + + #end loadbalancer_member_http_delete + + def loadbalancer_members_http_post(self): + key = 'loadbalancer-member' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_member_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('loadbalancer_member_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_loadbalancer_member_properties = LoadbalancerMemberType(**prop_dict) + xx_loadbalancer_member_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_loadbalancer_member_properties = LoadbalancerMemberType() + try: + xx_loadbalancer_member_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'loadbalancer-member', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'loadbalancer_member', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'loadbalancer_member', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('loadbalancer-member', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'loadbalancer_member', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['loadbalancer_member', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('loadbalancer-member') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'loadbalancer_member', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('loadbalancer-member', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'loadbalancer_member', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('loadbalancer-member', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_member_create', obj_dict) + except Exception as e: + pass + + return {'loadbalancer-member': rsp_body} + #end loadbalancer_members_http_post + + def loadbalancer_members_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'loadbalancer_members', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('loadbalancer-member', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'loadbalancer_members', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'loadbalancer-members': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('loadbalancer-member', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('loadbalancer-member', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('loadbalancer-member', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'loadbalancer_member_properties', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('loadbalancer-member', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('loadbalancer-member', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'loadbalancer-member': obj_dict}) + + return {'loadbalancer-members': obj_dicts} + #end loadbalancer_members_http_get + + def _loadbalancer_member_create_default_children(self, parent_obj): + pass + #end _loadbalancer_member_create_default_children + + def _loadbalancer_member_delete_default_children(self, parent_dict): + pass + #end _loadbalancer_member_delete_default_children + + def security_group_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_security_group_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'security_group': + abort(404, 'No security-group object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'security_group', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('security-group') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'security_group', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'security_group_id', u'configured_security_group_id', u'security_group_entries', u'id_perms', u'display_name'] + references = [] + back_references = [u'project_back_refs', 'virtual_machine_interface_back_refs'] + children = [u'access_control_lists'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('security-group', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'security_group', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'security_group', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('security-group', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_security_group_read', id, rsp_body) + except Exception as e: + pass + + return {'security-group': rsp_body} + #end security_group_http_get + + def security_group_http_put(self, id): + key = 'security-group' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_security_group_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'security_group': + abort(404, 'No security-group object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('security_group_entries') + if prop_dict: + buf = cStringIO.StringIO() + xx_security_group_entries = PolicyEntriesType(**prop_dict) + xx_security_group_entries.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_security_group_entries = PolicyEntriesType() + try: + xx_security_group_entries.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'security_group', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'security_group', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'access_control_list'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('security-group') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'security_group', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('security-group', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'security_group', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('security-group', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_security_group_update', id, obj_dict) + except Exception as e: + pass + + return {'security-group': rsp_body} + #end security_group_http_put + + def security_group_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'security_group': + abort(404, 'No security-group object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_security_group_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'project_back_refs', 'virtual_machine_interface_back_refs'] + children = [u'access_control_lists'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('security-group', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'security_group', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'security_group', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'security_group', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('security-group', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('security-group') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None) + if virtual_machine_interface_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'security_group', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._security_group_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'security_group', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('security-group', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'security_group', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_security_group_delete', id, read_result) + except Exception as e: + pass + + #end security_group_http_delete + + def security_groups_http_post(self): + key = 'security-group' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_security_group_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('security_group_entries') + if prop_dict: + buf = cStringIO.StringIO() + xx_security_group_entries = PolicyEntriesType(**prop_dict) + xx_security_group_entries.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_security_group_entries = PolicyEntriesType() + try: + xx_security_group_entries.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'security-group', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'security_group', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'security_group', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'access_control_list'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('security-group', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'security_group', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['security_group', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('security-group') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'security_group', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('security-group', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'security_group', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('security-group', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_security_group_create', obj_dict) + except Exception as e: + pass + + return {'security-group': rsp_body} + #end security_groups_http_post + + def security_groups_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'security_groups', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('security-group', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'security_groups', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'security-groups': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('security-group', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('security-group', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('security-group', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'security_group_id', u'configured_security_group_id', u'security_group_entries', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('security-group', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('security-group', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'security-group': obj_dict}) + + return {'security-groups': obj_dicts} + #end security_groups_http_get + + def _security_group_create_default_children(self, parent_obj): + pass + #end _security_group_create_default_children + + def _security_group_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-machine-interface') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('access_control_lists') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-access-control-list': + default_child_id = has_info['href'].split('/')[-1] + self.access_control_list_http_delete(default_child_id) + break + + pass + #end _security_group_delete_default_children + + def provider_attachment_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_provider_attachment_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'provider_attachment': + abort(404, 'No provider-attachment object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'provider_attachment', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('provider-attachment') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'provider_attachment', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'id_perms', u'display_name'] + references = ['virtual_router_refs'] + back_references = ['customer_attachment_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('provider-attachment', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'provider_attachment', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'provider_attachment', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('provider-attachment', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_provider_attachment_read', id, rsp_body) + except Exception as e: + pass + + return {'provider-attachment': rsp_body} + #end provider_attachment_http_get + + def provider_attachment_http_put(self, id): + key = 'provider-attachment' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_provider_attachment_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'provider_attachment': + abort(404, 'No provider-attachment object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'provider_attachment', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'provider_attachment', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = ['virtual_router'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('provider-attachment') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'provider_attachment', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('provider-attachment', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'provider_attachment', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('provider-attachment', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_provider_attachment_update', id, obj_dict) + except Exception as e: + pass + + return {'provider-attachment': rsp_body} + #end provider_attachment_http_put + + def provider_attachment_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'provider_attachment': + abort(404, 'No provider-attachment object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_provider_attachment_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = ['customer_attachment_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('provider-attachment', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'provider_attachment', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'provider_attachment', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'provider_attachment', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('provider-attachment', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('provider-attachment') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._provider_attachment_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'provider_attachment', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('provider-attachment', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'provider_attachment', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_provider_attachment_delete', id, read_result) + except Exception as e: + pass + + #end provider_attachment_http_delete + + def provider_attachments_http_post(self): + key = 'provider-attachment' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_provider_attachment_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'provider-attachment', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'provider_attachment', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # Validate perms + objtype_list = ['virtual_router'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('provider-attachment', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'provider_attachment', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['provider_attachment', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('provider-attachment') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'provider_attachment', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('provider-attachment', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'provider_attachment', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('provider-attachment', obj_ids['uuid']) + + try: + self._extension_mgrs['resourceApi'].map_method('post_provider_attachment_create', obj_dict) + except Exception as e: + pass + + return {'provider-attachment': rsp_body} + #end provider_attachments_http_post + + def provider_attachments_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'provider_attachments', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('provider-attachment', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'provider_attachments', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'provider-attachments': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('provider-attachment', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('provider-attachment', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('provider-attachment', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms', u'display_name'] + ['virtual_router_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('provider-attachment', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('provider-attachment', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'provider-attachment': obj_dict}) + + return {'provider-attachments': obj_dicts} + #end provider_attachments_http_get + + def _provider_attachment_create_default_children(self, parent_obj): + pass + #end _provider_attachment_create_default_children + + def _provider_attachment_delete_default_children(self, parent_dict): + pass + #end _provider_attachment_delete_default_children + + def virtual_machine_interface_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_interface_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'virtual_machine_interface': + abort(404, 'No virtual-machine-interface object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_machine_interface', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('virtual-machine-interface') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_machine_interface', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'virtual_machine_interface_mac_addresses', u'virtual_machine_interface_dhcp_option_list', u'virtual_machine_interface_host_routes', u'virtual_machine_interface_allowed_address_pairs', u'vrf_assign_table', u'virtual_machine_interface_device_owner', u'virtual_machine_interface_properties', u'id_perms', u'display_name'] + references = [u'qos_forwarding_class_refs', u'security_group_refs', 'virtual_machine_interface_refs', u'virtual_machine_refs', u'virtual_network_refs', 'routing_instance_refs', u'interface_route_table_refs'] + back_references = ['virtual_machine_interface_back_refs', u'virtual_machine_back_refs', u'project_back_refs', u'instance_ip_back_refs', u'subnet_back_refs', u'floating_ip_back_refs', u'logical_interface_back_refs', 'customer_attachment_back_refs', u'logical_router_back_refs', u'loadbalancer_pool_back_refs', u'virtual_ip_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('virtual-machine-interface', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_machine_interface', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'virtual_machine_interface', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-machine-interface', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_interface_read', id, rsp_body) + except Exception as e: + pass + + return {'virtual-machine-interface': rsp_body} + #end virtual_machine_interface_http_get + + def virtual_machine_interface_http_put(self, id): + key = 'virtual-machine-interface' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_interface_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_machine_interface': + abort(404, 'No virtual-machine-interface object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('virtual_machine_interface_mac_addresses') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_machine_interface_mac_addresses = MacAddressesType(**prop_dict) + xx_virtual_machine_interface_mac_addresses.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_machine_interface_mac_addresses = MacAddressesType() + try: + xx_virtual_machine_interface_mac_addresses.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('virtual_machine_interface_dhcp_option_list') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_machine_interface_dhcp_option_list = DhcpOptionsListType(**prop_dict) + xx_virtual_machine_interface_dhcp_option_list.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_machine_interface_dhcp_option_list = DhcpOptionsListType() + try: + xx_virtual_machine_interface_dhcp_option_list.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('virtual_machine_interface_host_routes') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_machine_interface_host_routes = RouteTableType(**prop_dict) + xx_virtual_machine_interface_host_routes.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_machine_interface_host_routes = RouteTableType() + try: + xx_virtual_machine_interface_host_routes.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('virtual_machine_interface_allowed_address_pairs') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_machine_interface_allowed_address_pairs = AllowedAddressPairs(**prop_dict) + xx_virtual_machine_interface_allowed_address_pairs.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_machine_interface_allowed_address_pairs = AllowedAddressPairs() + try: + xx_virtual_machine_interface_allowed_address_pairs.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('vrf_assign_table') + if prop_dict: + buf = cStringIO.StringIO() + xx_vrf_assign_table = VrfAssignTableType(**prop_dict) + xx_vrf_assign_table.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_vrf_assign_table = VrfAssignTableType() + try: + xx_vrf_assign_table.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('virtual_machine_interface_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_machine_interface_properties = VirtualMachineInterfacePropertiesType(**prop_dict) + xx_virtual_machine_interface_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_machine_interface_properties = VirtualMachineInterfacePropertiesType() + try: + xx_virtual_machine_interface_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('routing_instance_refs') or []: + buf = cStringIO.StringIO() + xx_routing_instance = PolicyBasedForwardingRuleType(**ref_dict['attr']) + xx_routing_instance.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_routing_instance.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'virtual_machine_interface', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_machine_interface', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'qos_forwarding_class', u'security_group', 'virtual_machine_interface', u'virtual_machine', u'virtual_network', 'routing_instance', u'interface_route_table'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('virtual-machine-interface') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'virtual_machine_interface', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('virtual-machine-interface', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_machine_interface', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-machine-interface', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_interface_update', id, obj_dict) + except Exception as e: + pass + + return {'virtual-machine-interface': rsp_body} + #end virtual_machine_interface_http_put + + def virtual_machine_interface_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_machine_interface': + abort(404, 'No virtual-machine-interface object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_interface_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = ['virtual_machine_interface_back_refs', u'virtual_machine_back_refs', u'project_back_refs', u'instance_ip_back_refs', u'subnet_back_refs', u'floating_ip_back_refs', u'logical_interface_back_refs', 'customer_attachment_back_refs', u'logical_router_back_refs', u'loadbalancer_pool_back_refs', u'virtual_ip_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('virtual-machine-interface', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'virtual_machine_interface', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-machine-interface', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('virtual-machine-interface') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None) + if virtual_machine_interface_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg) + abort(409, err_msg) + + instance_ip_back_refs = read_result.get('instance_ip_back_refs', None) + if instance_ip_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['instance_ip_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg) + abort(409, err_msg) + + subnet_back_refs = read_result.get('subnet_back_refs', None) + if subnet_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['subnet_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg) + abort(409, err_msg) + + floating_ip_back_refs = read_result.get('floating_ip_back_refs', None) + if floating_ip_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['floating_ip_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg) + abort(409, err_msg) + + logical_interface_back_refs = read_result.get('logical_interface_back_refs', None) + if logical_interface_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['logical_interface_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg) + abort(409, err_msg) + + customer_attachment_back_refs = read_result.get('customer_attachment_back_refs', None) + if customer_attachment_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['customer_attachment_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg) + abort(409, err_msg) + + logical_router_back_refs = read_result.get('logical_router_back_refs', None) + if logical_router_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['logical_router_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg) + abort(409, err_msg) + + loadbalancer_pool_back_refs = read_result.get('loadbalancer_pool_back_refs', None) + if loadbalancer_pool_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['loadbalancer_pool_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg) + abort(409, err_msg) + + virtual_ip_back_refs = read_result.get('virtual_ip_back_refs', None) + if virtual_ip_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_ip_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._virtual_machine_interface_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('virtual-machine-interface', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_machine_interface', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_interface_delete', id, read_result) + except Exception as e: + pass + + #end virtual_machine_interface_http_delete + + def virtual_machine_interfaces_http_post(self): + key = 'virtual-machine-interface' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_machine_interface_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('virtual_machine_interface_mac_addresses') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_machine_interface_mac_addresses = MacAddressesType(**prop_dict) + xx_virtual_machine_interface_mac_addresses.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_machine_interface_mac_addresses = MacAddressesType() + try: + xx_virtual_machine_interface_mac_addresses.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('virtual_machine_interface_dhcp_option_list') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_machine_interface_dhcp_option_list = DhcpOptionsListType(**prop_dict) + xx_virtual_machine_interface_dhcp_option_list.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_machine_interface_dhcp_option_list = DhcpOptionsListType() + try: + xx_virtual_machine_interface_dhcp_option_list.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('virtual_machine_interface_host_routes') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_machine_interface_host_routes = RouteTableType(**prop_dict) + xx_virtual_machine_interface_host_routes.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_machine_interface_host_routes = RouteTableType() + try: + xx_virtual_machine_interface_host_routes.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('virtual_machine_interface_allowed_address_pairs') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_machine_interface_allowed_address_pairs = AllowedAddressPairs(**prop_dict) + xx_virtual_machine_interface_allowed_address_pairs.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_machine_interface_allowed_address_pairs = AllowedAddressPairs() + try: + xx_virtual_machine_interface_allowed_address_pairs.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('vrf_assign_table') + if prop_dict: + buf = cStringIO.StringIO() + xx_vrf_assign_table = VrfAssignTableType(**prop_dict) + xx_vrf_assign_table.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_vrf_assign_table = VrfAssignTableType() + try: + xx_vrf_assign_table.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('virtual_machine_interface_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_machine_interface_properties = VirtualMachineInterfacePropertiesType(**prop_dict) + xx_virtual_machine_interface_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_machine_interface_properties = VirtualMachineInterfacePropertiesType() + try: + xx_virtual_machine_interface_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('routing_instance_refs') or []: + buf = cStringIO.StringIO() + xx_routing_instance = PolicyBasedForwardingRuleType(**ref_dict['attr']) + xx_routing_instance.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_routing_instance.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'virtual-machine-interface', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'virtual_machine_interface', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'virtual_machine_interface', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'qos_forwarding_class', u'security_group', 'virtual_machine_interface', u'virtual_machine', u'virtual_network', 'routing_instance', u'interface_route_table'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('virtual-machine-interface', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'virtual_machine_interface', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['virtual_machine_interface', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('virtual-machine-interface') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_machine_interface', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('virtual-machine-interface', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_machine_interface', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('virtual-machine-interface', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_machine_interface_create', obj_dict) + except Exception as e: + pass + + return {'virtual-machine-interface': rsp_body} + #end virtual_machine_interfaces_http_post + + def virtual_machine_interfaces_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'virtual_machine_interfaces', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('virtual-machine-interface', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'virtual_machine_interfaces', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'virtual-machine-interfaces': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('virtual-machine-interface', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('virtual-machine-interface', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('virtual-machine-interface', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'virtual_machine_interface_mac_addresses', u'virtual_machine_interface_dhcp_option_list', u'virtual_machine_interface_host_routes', u'virtual_machine_interface_allowed_address_pairs', u'vrf_assign_table', u'virtual_machine_interface_device_owner', u'virtual_machine_interface_properties', u'id_perms', u'display_name'] + [u'qos_forwarding_class_refs', u'security_group_refs', 'virtual_machine_interface_refs', u'virtual_machine_refs', u'virtual_network_refs', 'routing_instance_refs', u'interface_route_table_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('virtual-machine-interface', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('virtual-machine-interface', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'virtual-machine-interface': obj_dict}) + + return {'virtual-machine-interfaces': obj_dicts} + #end virtual_machine_interfaces_http_get + + def _virtual_machine_interface_create_default_children(self, parent_obj): + pass + #end _virtual_machine_interface_create_default_children + + def _virtual_machine_interface_delete_default_children(self, parent_dict): + pass + #end _virtual_machine_interface_delete_default_children + + def loadbalancer_healthmonitor_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_healthmonitor_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'loadbalancer_healthmonitor': + abort(404, 'No loadbalancer-healthmonitor object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('loadbalancer-healthmonitor') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'loadbalancer_healthmonitor_properties', u'id_perms', u'display_name'] + references = [] + back_references = [u'project_back_refs', u'loadbalancer_pool_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('loadbalancer-healthmonitor', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('loadbalancer-healthmonitor', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_healthmonitor_read', id, rsp_body) + except Exception as e: + pass + + return {'loadbalancer-healthmonitor': rsp_body} + #end loadbalancer_healthmonitor_http_get + + def loadbalancer_healthmonitor_http_put(self, id): + key = 'loadbalancer-healthmonitor' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_healthmonitor_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'loadbalancer_healthmonitor': + abort(404, 'No loadbalancer-healthmonitor object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('loadbalancer_healthmonitor_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_loadbalancer_healthmonitor_properties = LoadbalancerHealthmonitorType(**prop_dict) + xx_loadbalancer_healthmonitor_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_loadbalancer_healthmonitor_properties = LoadbalancerHealthmonitorType() + try: + xx_loadbalancer_healthmonitor_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'loadbalancer_healthmonitor', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('loadbalancer-healthmonitor') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('loadbalancer-healthmonitor', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('loadbalancer-healthmonitor', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_healthmonitor_update', id, obj_dict) + except Exception as e: + pass + + return {'loadbalancer-healthmonitor': rsp_body} + #end loadbalancer_healthmonitor_http_put + + def loadbalancer_healthmonitor_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'loadbalancer_healthmonitor': + abort(404, 'No loadbalancer-healthmonitor object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_healthmonitor_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'project_back_refs', u'loadbalancer_pool_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('loadbalancer-healthmonitor', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'loadbalancer_healthmonitor', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('loadbalancer-healthmonitor', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('loadbalancer-healthmonitor') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + loadbalancer_pool_back_refs = read_result.get('loadbalancer_pool_back_refs', None) + if loadbalancer_pool_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['loadbalancer_pool_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._loadbalancer_healthmonitor_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('loadbalancer-healthmonitor', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'loadbalancer_healthmonitor', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_healthmonitor_delete', id, read_result) + except Exception as e: + pass + + #end loadbalancer_healthmonitor_http_delete + + def loadbalancer_healthmonitors_http_post(self): + key = 'loadbalancer-healthmonitor' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_loadbalancer_healthmonitor_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('loadbalancer_healthmonitor_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_loadbalancer_healthmonitor_properties = LoadbalancerHealthmonitorType(**prop_dict) + xx_loadbalancer_healthmonitor_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_loadbalancer_healthmonitor_properties = LoadbalancerHealthmonitorType() + try: + xx_loadbalancer_healthmonitor_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'loadbalancer-healthmonitor', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'loadbalancer_healthmonitor', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'loadbalancer_healthmonitor', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('loadbalancer-healthmonitor', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'loadbalancer_healthmonitor', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['loadbalancer_healthmonitor', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('loadbalancer-healthmonitor') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'loadbalancer_healthmonitor', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('loadbalancer-healthmonitor', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'loadbalancer_healthmonitor', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('loadbalancer-healthmonitor', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_loadbalancer_healthmonitor_create', obj_dict) + except Exception as e: + pass + + return {'loadbalancer-healthmonitor': rsp_body} + #end loadbalancer_healthmonitors_http_post + + def loadbalancer_healthmonitors_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'loadbalancer_healthmonitors', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('loadbalancer-healthmonitor', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'loadbalancer_healthmonitors', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'loadbalancer-healthmonitors': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('loadbalancer-healthmonitor', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('loadbalancer-healthmonitor', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('loadbalancer-healthmonitor', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'loadbalancer_healthmonitor_properties', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('loadbalancer-healthmonitor', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('loadbalancer-healthmonitor', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'loadbalancer-healthmonitor': obj_dict}) + + return {'loadbalancer-healthmonitors': obj_dicts} + #end loadbalancer_healthmonitors_http_get + + def _loadbalancer_healthmonitor_create_default_children(self, parent_obj): + pass + #end _loadbalancer_healthmonitor_create_default_children + + def _loadbalancer_healthmonitor_delete_default_children(self, parent_dict): + pass + #end _loadbalancer_healthmonitor_delete_default_children + + def virtual_network_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_network_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'virtual_network': + abort(404, 'No virtual-network object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_network', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('virtual-network') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_network', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'virtual_network_properties', u'virtual_network_network_id', u'route_target_list', u'router_external', u'is_shared', u'external_ipam', u'flood_unknown_unicast', u'id_perms', u'display_name'] + references = [u'qos_forwarding_class_refs', u'network_ipam_refs', u'network_policy_refs', u'route_table_refs'] + back_references = [u'project_back_refs', 'virtual_machine_interface_back_refs', u'instance_ip_back_refs', u'physical_router_back_refs', u'logical_router_back_refs'] + children = [u'access_control_lists', u'floating_ip_pools', 'routing_instances'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('virtual-network', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'virtual_network', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'virtual_network', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-network', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_network_read', id, rsp_body) + except Exception as e: + pass + + return {'virtual-network': rsp_body} + #end virtual_network_http_get + + def virtual_network_http_put(self, id): + key = 'virtual-network' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_network_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_network': + abort(404, 'No virtual-network object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('virtual_network_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_network_properties = VirtualNetworkType(**prop_dict) + xx_virtual_network_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_network_properties = VirtualNetworkType() + try: + xx_virtual_network_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('route_target_list') + if prop_dict: + buf = cStringIO.StringIO() + xx_route_target_list = RouteTargetList(**prop_dict) + xx_route_target_list.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_route_target_list = RouteTargetList() + try: + xx_route_target_list.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('network_ipam_refs') or []: + buf = cStringIO.StringIO() + xx_network_ipam = VnSubnetsType(**ref_dict['attr']) + xx_network_ipam.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_network_ipam.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('network_policy_refs') or []: + buf = cStringIO.StringIO() + xx_network_policy = VirtualNetworkPolicyType(**ref_dict['attr']) + xx_network_policy.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_network_policy.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'virtual_network', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'virtual_network', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'qos_forwarding_class', u'network_ipam', u'network_policy', u'access_control_list', u'floating_ip_pool', 'routing_instance', u'route_table'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('virtual-network') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'virtual_network', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('virtual-network', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_network', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('virtual-network', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_network_update', id, obj_dict) + except Exception as e: + pass + + return {'virtual-network': rsp_body} + #end virtual_network_http_put + + def virtual_network_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'virtual_network': + abort(404, 'No virtual-network object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_network_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'project_back_refs', 'virtual_machine_interface_back_refs', u'instance_ip_back_refs', u'physical_router_back_refs', u'logical_router_back_refs'] + children = [u'access_control_lists', u'floating_ip_pools', 'routing_instances'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('virtual-network', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'virtual_network', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'virtual_network', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_network', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('virtual-network', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('virtual-network') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + floating_ip_pools = read_result.get('floating_ip_pools', None) + if floating_ip_pools: + has_infos = read_result['floating_ip_pools'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-floating-ip-pool')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'virtual_network', 'http_delete', err_msg) + abort(409, err_msg) + + virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None) + if virtual_machine_interface_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_network', 'http_delete', err_msg) + abort(409, err_msg) + + instance_ip_back_refs = read_result.get('instance_ip_back_refs', None) + if instance_ip_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['instance_ip_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_network', 'http_delete', err_msg) + abort(409, err_msg) + + physical_router_back_refs = read_result.get('physical_router_back_refs', None) + if physical_router_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['physical_router_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_network', 'http_delete', err_msg) + abort(409, err_msg) + + logical_router_back_refs = read_result.get('logical_router_back_refs', None) + if logical_router_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['logical_router_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'virtual_network', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._virtual_network_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'virtual_network', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('virtual-network', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'virtual_network', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_network_delete', id, read_result) + except Exception as e: + pass + + #end virtual_network_http_delete + + def virtual_networks_http_post(self): + key = 'virtual-network' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_virtual_network_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('virtual_network_properties') + if prop_dict: + buf = cStringIO.StringIO() + xx_virtual_network_properties = VirtualNetworkType(**prop_dict) + xx_virtual_network_properties.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_virtual_network_properties = VirtualNetworkType() + try: + xx_virtual_network_properties.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('route_target_list') + if prop_dict: + buf = cStringIO.StringIO() + xx_route_target_list = RouteTargetList(**prop_dict) + xx_route_target_list.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_route_target_list = RouteTargetList() + try: + xx_route_target_list.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('network_ipam_refs') or []: + buf = cStringIO.StringIO() + xx_network_ipam = VnSubnetsType(**ref_dict['attr']) + xx_network_ipam.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_network_ipam.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('network_policy_refs') or []: + buf = cStringIO.StringIO() + xx_network_policy = VirtualNetworkPolicyType(**ref_dict['attr']) + xx_network_policy.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_network_policy.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'virtual-network', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'virtual_network', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'virtual_network', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'qos_forwarding_class', u'network_ipam', u'network_policy', u'access_control_list', u'floating_ip_pool', 'routing_instance', u'route_table'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('virtual-network', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'virtual_network', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['virtual_network', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('virtual-network') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_network', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('virtual-network', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'virtual_network', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('virtual-network', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_virtual_network_create', obj_dict) + except Exception as e: + pass + + return {'virtual-network': rsp_body} + #end virtual_networks_http_post + + def virtual_networks_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'virtual_networks', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('virtual-network', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'virtual_networks', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'virtual-networks': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('virtual-network', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('virtual-network', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('virtual-network', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'virtual_network_properties', u'virtual_network_network_id', u'route_target_list', u'router_external', u'is_shared', u'external_ipam', u'flood_unknown_unicast', u'id_perms', u'display_name'] + [u'qos_forwarding_class_refs', u'network_ipam_refs', u'network_policy_refs', u'route_table_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('virtual-network', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('virtual-network', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'virtual-network': obj_dict}) + + return {'virtual-networks': obj_dicts} + #end virtual_networks_http_get + + def _virtual_network_create_default_children(self, parent_obj): + # Create a default child only if provisioned for + r_class = self.get_resource_class('floating-ip-pool') + if r_class and r_class.generate_default_instance: + child_obj = FloatingIpPool(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('floating-ip-pool') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('floating-ip-pool', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('floating-ip-pool', obj_ids, child_dict) + self._floating_ip_pool_create_default_children(child_obj) + + pass + #end _virtual_network_create_default_children + + def _virtual_network_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('floating-ip-pool') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('access_control_lists') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-access-control-list': + default_child_id = has_info['href'].split('/')[-1] + self.access_control_list_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('floating-ip-pool') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('floating_ip_pools') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-floating-ip-pool': + default_child_id = has_info['href'].split('/')[-1] + self.floating_ip_pool_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('floating-ip-pool') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('routing_instances') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-routing-instance': + default_child_id = has_info['href'].split('/')[-1] + self.routing_instance_http_delete(default_child_id) + break + + pass + #end _virtual_network_delete_default_children + + def project_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_project_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'project': + abort(404, 'No project object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'project', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('project') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'project', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'quota', u'id_perms', u'display_name'] + references = [u'namespace_refs', u'floating_ip_pool_refs'] + back_references = [u'domain_back_refs', u'floating_ip_back_refs'] + children = [u'security_groups', u'virtual_networks', u'qos_queues', u'qos_forwarding_classs', u'network_ipams', u'network_policys', 'virtual_machine_interfaces', u'service_instances', u'route_tables', u'interface_route_tables', u'logical_routers', u'loadbalancer_pools', u'loadbalancer_healthmonitors', u'virtual_ips'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('project', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'project', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'project', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('project', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_project_read', id, rsp_body) + except Exception as e: + pass + + return {'project': rsp_body} + #end project_http_get + + def project_http_put(self, id): + key = 'project' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_project_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'project': + abort(404, 'No project object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('quota') + if prop_dict: + buf = cStringIO.StringIO() + xx_quota = QuotaType(**prop_dict) + xx_quota.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_quota = QuotaType() + try: + xx_quota.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('namespace_refs') or []: + buf = cStringIO.StringIO() + xx_namespace = SubnetType(**ref_dict['attr']) + xx_namespace.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_namespace.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'project', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'project', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'namespace', u'security_group', u'virtual_network', u'qos_queue', u'qos_forwarding_class', u'network_ipam', u'network_policy', 'virtual_machine_interface', u'floating_ip_pool', u'service_instance', u'route_table', u'interface_route_table', u'logical_router', u'loadbalancer_pool', u'loadbalancer_healthmonitor', u'virtual_ip'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('project') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'project', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('project', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'project', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('project', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_project_update', id, obj_dict) + except Exception as e: + pass + + return {'project': rsp_body} + #end project_http_put + + def project_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'project': + abort(404, 'No project object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_project_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'domain_back_refs', u'floating_ip_back_refs'] + children = [u'security_groups', u'virtual_networks', u'qos_queues', u'qos_forwarding_classs', u'network_ipams', u'network_policys', 'virtual_machine_interfaces', u'service_instances', u'route_tables', u'interface_route_tables', u'logical_routers', u'loadbalancer_pools', u'loadbalancer_healthmonitors', u'virtual_ips'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('project', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'project', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'project', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'project', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('project', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('project') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + security_groups = read_result.get('security_groups', None) + if security_groups: + has_infos = read_result['security_groups'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-security-group')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + virtual_networks = read_result.get('virtual_networks', None) + if virtual_networks: + has_infos = read_result['virtual_networks'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-network')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + qos_queues = read_result.get('qos_queues', None) + if qos_queues: + has_infos = read_result['qos_queues'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-qos-queue')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + qos_forwarding_classs = read_result.get('qos_forwarding_classs', None) + if qos_forwarding_classs: + has_infos = read_result['qos_forwarding_classs'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-qos-forwarding-class')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + network_ipams = read_result.get('network_ipams', None) + if network_ipams: + has_infos = read_result['network_ipams'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-network-ipam')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + network_policys = read_result.get('network_policys', None) + if network_policys: + has_infos = read_result['network_policys'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-network-policy')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + virtual_machine_interfaces = read_result.get('virtual_machine_interfaces', None) + if virtual_machine_interfaces: + has_infos = read_result['virtual_machine_interfaces'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-machine-interface')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + service_instances = read_result.get('service_instances', None) + if service_instances: + has_infos = read_result['service_instances'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-service-instance')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + route_tables = read_result.get('route_tables', None) + if route_tables: + has_infos = read_result['route_tables'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-route-table')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + interface_route_tables = read_result.get('interface_route_tables', None) + if interface_route_tables: + has_infos = read_result['interface_route_tables'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-interface-route-table')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + logical_routers = read_result.get('logical_routers', None) + if logical_routers: + has_infos = read_result['logical_routers'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-logical-router')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + loadbalancer_pools = read_result.get('loadbalancer_pools', None) + if loadbalancer_pools: + has_infos = read_result['loadbalancer_pools'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-loadbalancer-pool')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + loadbalancer_healthmonitors = read_result.get('loadbalancer_healthmonitors', None) + if loadbalancer_healthmonitors: + has_infos = read_result['loadbalancer_healthmonitors'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-loadbalancer-healthmonitor')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + virtual_ips = read_result.get('virtual_ips', None) + if virtual_ips: + has_infos = read_result['virtual_ips'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-virtual-ip')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + floating_ip_back_refs = read_result.get('floating_ip_back_refs', None) + if floating_ip_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['floating_ip_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'project', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._project_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'project', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('project', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'project', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_project_delete', id, read_result) + except Exception as e: + pass + + #end project_http_delete + + def projects_http_post(self): + key = 'project' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_project_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('quota') + if prop_dict: + buf = cStringIO.StringIO() + xx_quota = QuotaType(**prop_dict) + xx_quota.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_quota = QuotaType() + try: + xx_quota.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('namespace_refs') or []: + buf = cStringIO.StringIO() + xx_namespace = SubnetType(**ref_dict['attr']) + xx_namespace.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_namespace.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'project', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'project', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'project', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'namespace', u'security_group', u'virtual_network', u'qos_queue', u'qos_forwarding_class', u'network_ipam', u'network_policy', 'virtual_machine_interface', u'floating_ip_pool', u'service_instance', u'route_table', u'interface_route_table', u'logical_router', u'loadbalancer_pool', u'loadbalancer_healthmonitor', u'virtual_ip'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('project', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'project', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['project', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('project') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'project', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('project', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'project', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('project', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_project_create', obj_dict) + except Exception as e: + pass + + return {'project': rsp_body} + #end projects_http_post + + def projects_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'projects', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('project', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'projects', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'projects': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('project', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('project', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('project', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'quota', u'id_perms', u'display_name'] + [u'namespace_refs', u'floating_ip_pool_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('project', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('project', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'project': obj_dict}) + + return {'projects': obj_dicts} + #end projects_http_get + + def _project_create_default_children(self, parent_obj): + # Create a default child only if provisioned for + r_class = self.get_resource_class('security-group') + if r_class and r_class.generate_default_instance: + child_obj = SecurityGroup(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('security-group') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('security-group', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('security-group', obj_ids, child_dict) + self._security_group_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('virtual-network') + if r_class and r_class.generate_default_instance: + child_obj = VirtualNetwork(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('virtual-network') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('virtual-network', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('virtual-network', obj_ids, child_dict) + self._virtual_network_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('qos-queue') + if r_class and r_class.generate_default_instance: + child_obj = QosQueue(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('qos-queue') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('qos-queue', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('qos-queue', obj_ids, child_dict) + self._qos_queue_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('qos-forwarding-class') + if r_class and r_class.generate_default_instance: + child_obj = QosForwardingClass(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('qos-forwarding-class') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('qos-forwarding-class', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('qos-forwarding-class', obj_ids, child_dict) + self._qos_forwarding_class_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('network-ipam') + if r_class and r_class.generate_default_instance: + child_obj = NetworkIpam(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('network-ipam') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('network-ipam', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('network-ipam', obj_ids, child_dict) + self._network_ipam_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('network-policy') + if r_class and r_class.generate_default_instance: + child_obj = NetworkPolicy(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('network-policy') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('network-policy', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('network-policy', obj_ids, child_dict) + self._network_policy_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('virtual-machine-interface') + if r_class and r_class.generate_default_instance: + child_obj = VirtualMachineInterface(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('virtual-machine-interface') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('virtual-machine-interface', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('virtual-machine-interface', obj_ids, child_dict) + self._virtual_machine_interface_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('service-instance') + if r_class and r_class.generate_default_instance: + child_obj = ServiceInstance(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('service-instance') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('service-instance', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('service-instance', obj_ids, child_dict) + self._service_instance_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('route-table') + if r_class and r_class.generate_default_instance: + child_obj = RouteTable(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('route-table') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('route-table', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('route-table', obj_ids, child_dict) + self._route_table_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('interface-route-table') + if r_class and r_class.generate_default_instance: + child_obj = InterfaceRouteTable(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('interface-route-table') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('interface-route-table', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('interface-route-table', obj_ids, child_dict) + self._interface_route_table_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('logical-router') + if r_class and r_class.generate_default_instance: + child_obj = LogicalRouter(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('logical-router') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('logical-router', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('logical-router', obj_ids, child_dict) + self._logical_router_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('loadbalancer-pool') + if r_class and r_class.generate_default_instance: + child_obj = LoadbalancerPool(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('loadbalancer-pool') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('loadbalancer-pool', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('loadbalancer-pool', obj_ids, child_dict) + self._loadbalancer_pool_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('loadbalancer-healthmonitor') + if r_class and r_class.generate_default_instance: + child_obj = LoadbalancerHealthmonitor(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('loadbalancer-healthmonitor') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('loadbalancer-healthmonitor', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('loadbalancer-healthmonitor', obj_ids, child_dict) + self._loadbalancer_healthmonitor_create_default_children(child_obj) + + # Create a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + child_obj = VirtualIp(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('virtual-ip') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('virtual-ip', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('virtual-ip', obj_ids, child_dict) + self._virtual_ip_create_default_children(child_obj) + + pass + #end _project_create_default_children + + def _project_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('security_groups') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-security-group': + default_child_id = has_info['href'].split('/')[-1] + self.security_group_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('virtual_networks') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-virtual-network': + default_child_id = has_info['href'].split('/')[-1] + self.virtual_network_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('qos_queues') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-qos-queue': + default_child_id = has_info['href'].split('/')[-1] + self.qos_queue_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('qos_forwarding_classs') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-qos-forwarding-class': + default_child_id = has_info['href'].split('/')[-1] + self.qos_forwarding_class_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('network_ipams') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-network-ipam': + default_child_id = has_info['href'].split('/')[-1] + self.network_ipam_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('network_policys') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-network-policy': + default_child_id = has_info['href'].split('/')[-1] + self.network_policy_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('virtual_machine_interfaces') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-virtual-machine-interface': + default_child_id = has_info['href'].split('/')[-1] + self.virtual_machine_interface_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('service_instances') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-service-instance': + default_child_id = has_info['href'].split('/')[-1] + self.service_instance_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('route_tables') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-route-table': + default_child_id = has_info['href'].split('/')[-1] + self.route_table_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('interface_route_tables') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-interface-route-table': + default_child_id = has_info['href'].split('/')[-1] + self.interface_route_table_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('logical_routers') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-logical-router': + default_child_id = has_info['href'].split('/')[-1] + self.logical_router_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('loadbalancer_pools') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-loadbalancer-pool': + default_child_id = has_info['href'].split('/')[-1] + self.loadbalancer_pool_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('loadbalancer_healthmonitors') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-loadbalancer-healthmonitor': + default_child_id = has_info['href'].split('/')[-1] + self.loadbalancer_healthmonitor_http_delete(default_child_id) + break + + # Delete a default child only if provisioned for + r_class = self.get_resource_class('virtual-ip') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('virtual_ips') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-virtual-ip': + default_child_id = has_info['href'].split('/')[-1] + self.virtual_ip_http_delete(default_child_id) + break + + pass + #end _project_delete_default_children + + def qos_forwarding_class_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_qos_forwarding_class_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'qos_forwarding_class': + abort(404, 'No qos-forwarding-class object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'qos_forwarding_class', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('qos-forwarding-class') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'qos_forwarding_class', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'dscp', u'trusted', u'id_perms', u'display_name'] + references = [u'qos_queue_refs'] + back_references = [u'project_back_refs', u'virtual_network_back_refs', 'virtual_machine_interface_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('qos-forwarding-class', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'qos_forwarding_class', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'qos_forwarding_class', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('qos-forwarding-class', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_qos_forwarding_class_read', id, rsp_body) + except Exception as e: + pass + + return {'qos-forwarding-class': rsp_body} + #end qos_forwarding_class_http_get + + def qos_forwarding_class_http_put(self, id): + key = 'qos-forwarding-class' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_qos_forwarding_class_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'qos_forwarding_class': + abort(404, 'No qos-forwarding-class object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'qos_forwarding_class', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'qos_forwarding_class', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'qos_queue'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('qos-forwarding-class') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'qos_forwarding_class', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('qos-forwarding-class', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'qos_forwarding_class', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('qos-forwarding-class', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_qos_forwarding_class_update', id, obj_dict) + except Exception as e: + pass + + return {'qos-forwarding-class': rsp_body} + #end qos_forwarding_class_http_put + + def qos_forwarding_class_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'qos_forwarding_class': + abort(404, 'No qos-forwarding-class object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_qos_forwarding_class_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'project_back_refs', u'virtual_network_back_refs', 'virtual_machine_interface_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('qos-forwarding-class', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'qos_forwarding_class', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'qos_forwarding_class', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'qos_forwarding_class', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('qos-forwarding-class', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('qos-forwarding-class') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + virtual_network_back_refs = read_result.get('virtual_network_back_refs', None) + if virtual_network_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_network_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'qos_forwarding_class', 'http_delete', err_msg) + abort(409, err_msg) + + virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None) + if virtual_machine_interface_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'qos_forwarding_class', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._qos_forwarding_class_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'qos_forwarding_class', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('qos-forwarding-class', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'qos_forwarding_class', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_qos_forwarding_class_delete', id, read_result) + except Exception as e: + pass + + #end qos_forwarding_class_http_delete + + def qos_forwarding_classs_http_post(self): + key = 'qos-forwarding-class' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_qos_forwarding_class_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'qos-forwarding-class', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'qos_forwarding_class', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'qos_forwarding_class', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'qos_queue'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('qos-forwarding-class', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'qos_forwarding_class', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['qos_forwarding_class', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('qos-forwarding-class') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'qos_forwarding_class', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('qos-forwarding-class', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'qos_forwarding_class', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('qos-forwarding-class', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_qos_forwarding_class_create', obj_dict) + except Exception as e: + pass + + return {'qos-forwarding-class': rsp_body} + #end qos_forwarding_classs_http_post + + def qos_forwarding_classs_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'qos_forwarding_classs', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('qos-forwarding-class', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'qos_forwarding_classs', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'qos-forwarding-classs': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('qos-forwarding-class', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('qos-forwarding-class', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('qos-forwarding-class', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'dscp', u'trusted', u'id_perms', u'display_name'] + [u'qos_queue_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('qos-forwarding-class', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('qos-forwarding-class', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'qos-forwarding-class': obj_dict}) + + return {'qos-forwarding-classs': obj_dicts} + #end qos_forwarding_classs_http_get + + def _qos_forwarding_class_create_default_children(self, parent_obj): + pass + #end _qos_forwarding_class_create_default_children + + def _qos_forwarding_class_delete_default_children(self, parent_dict): + pass + #end _qos_forwarding_class_delete_default_children + + def database_node_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_database_node_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'database_node': + abort(404, 'No database-node object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'database_node', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('database-node') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'database_node', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'database_node_ip_address', u'id_perms', u'display_name'] + references = [] + back_references = [u'global_system_config_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('database-node', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'database_node', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'database_node', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('database-node', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_database_node_read', id, rsp_body) + except Exception as e: + pass + + return {'database-node': rsp_body} + #end database_node_http_get + + def database_node_http_put(self, id): + key = 'database-node' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_database_node_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'database_node': + abort(404, 'No database-node object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'database_node', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'database_node', 'http_put', msg) + abort(code, msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('database-node') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'database_node', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('database-node', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'database_node', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('database-node', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_database_node_update', id, obj_dict) + except Exception as e: + pass + + return {'database-node': rsp_body} + #end database_node_http_put + + def database_node_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'database_node': + abort(404, 'No database-node object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_database_node_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'global_system_config_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('database-node', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'database_node', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'database_node', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'database_node', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('database-node', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('database-node') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._database_node_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'database_node', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('database-node', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'database_node', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_database_node_delete', id, read_result) + except Exception as e: + pass + + #end database_node_http_delete + + def database_nodes_http_post(self): + key = 'database-node' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_database_node_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'database-node', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'database_node', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'database_node', 'http_post', err_msg) + abort(400, err_msg) + + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('database-node', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'database_node', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['database_node', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('database-node') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'database_node', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('database-node', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'database_node', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('database-node', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_database_node_create', obj_dict) + except Exception as e: + pass + + return {'database-node': rsp_body} + #end database_nodes_http_post + + def database_nodes_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'database_nodes', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('database-node', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'database_nodes', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'database-nodes': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('database-node', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('database-node', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('database-node', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'database_node_ip_address', u'id_perms', u'display_name'] + [] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('database-node', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('database-node', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'database-node': obj_dict}) + + return {'database-nodes': obj_dicts} + #end database_nodes_http_get + + def _database_node_create_default_children(self, parent_obj): + pass + #end _database_node_create_default_children + + def _database_node_delete_default_children(self, parent_dict): + pass + #end _database_node_delete_default_children + + def routing_instance_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_routing_instance_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'routing_instance': + abort(404, 'No routing-instance object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'routing_instance', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('routing-instance') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'routing_instance', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'service_chain_information', u'routing_instance_is_default', u'static_route_entries', u'default_ce_protocol', u'id_perms', u'display_name'] + references = ['routing_instance_refs', 'route_target_refs'] + back_references = ['virtual_machine_interface_back_refs', u'virtual_network_back_refs', 'routing_instance_back_refs', 'customer_attachment_back_refs'] + children = ['bgp_routers'] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('routing-instance', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'routing_instance', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'routing_instance', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('routing-instance', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_routing_instance_read', id, rsp_body) + except Exception as e: + pass + + return {'routing-instance': rsp_body} + #end routing_instance_http_get + + def routing_instance_http_put(self, id): + key = 'routing-instance' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_routing_instance_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'routing_instance': + abort(404, 'No routing-instance object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('service_chain_information') + if prop_dict: + buf = cStringIO.StringIO() + xx_service_chain_information = ServiceChainInfo(**prop_dict) + xx_service_chain_information.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_service_chain_information = ServiceChainInfo() + try: + xx_service_chain_information.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('static_route_entries') + if prop_dict: + buf = cStringIO.StringIO() + xx_static_route_entries = StaticRouteEntriesType(**prop_dict) + xx_static_route_entries.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_static_route_entries = StaticRouteEntriesType() + try: + xx_static_route_entries.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('default_ce_protocol') + if prop_dict: + buf = cStringIO.StringIO() + xx_default_ce_protocol = DefaultProtocolType(**prop_dict) + xx_default_ce_protocol.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_default_ce_protocol = DefaultProtocolType() + try: + xx_default_ce_protocol.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('routing_instance_refs') or []: + if fq_name == ref_dict['to']: + abort(404, 'Cannot add reference to self') + buf = cStringIO.StringIO() + xx_routing_instance = ConnectionType(**ref_dict['attr']) + xx_routing_instance.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_routing_instance.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('route_target_refs') or []: + buf = cStringIO.StringIO() + xx_route_target = InstanceTargetType(**ref_dict['attr']) + xx_route_target.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_route_target.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'routing_instance', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'routing_instance', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = ['bgp_router', 'routing_instance', 'route_target'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('routing-instance') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'routing_instance', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('routing-instance', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'routing_instance', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('routing-instance', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_routing_instance_update', id, obj_dict) + except Exception as e: + pass + + return {'routing-instance': rsp_body} + #end routing_instance_http_put + + def routing_instance_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'routing_instance': + abort(404, 'No routing-instance object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_routing_instance_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = ['virtual_machine_interface_back_refs', u'virtual_network_back_refs', 'routing_instance_back_refs', 'customer_attachment_back_refs'] + children = ['bgp_routers'] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('routing-instance', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'routing_instance', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'routing_instance', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'routing_instance', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('routing-instance', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('routing-instance') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + bgp_routers = read_result.get('bgp_routers', None) + if bgp_routers: + has_infos = read_result['bgp_routers'] + if ((len(has_infos) > 1) or + (len(has_infos) == 1 and has_infos[0]['to'][-1] != 'default-bgp-router')): + has_urls = [has_info['href'] for has_info in has_infos] + has_str = ', '.join(has_urls) + err_msg = 'Children ' + has_str + ' still exist' + self.config_object_error(id, None, 'routing_instance', 'http_delete', err_msg) + abort(409, err_msg) + + virtual_machine_interface_back_refs = read_result.get('virtual_machine_interface_back_refs', None) + if virtual_machine_interface_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_machine_interface_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'routing_instance', 'http_delete', err_msg) + abort(409, err_msg) + + routing_instance_back_refs = read_result.get('routing_instance_back_refs', None) + if routing_instance_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['routing_instance_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'routing_instance', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._routing_instance_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'routing_instance', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('routing-instance', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'routing_instance', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_routing_instance_delete', id, read_result) + except Exception as e: + pass + + #end routing_instance_http_delete + + def routing_instances_http_post(self): + key = 'routing-instance' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_routing_instance_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('service_chain_information') + if prop_dict: + buf = cStringIO.StringIO() + xx_service_chain_information = ServiceChainInfo(**prop_dict) + xx_service_chain_information.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_service_chain_information = ServiceChainInfo() + try: + xx_service_chain_information.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('static_route_entries') + if prop_dict: + buf = cStringIO.StringIO() + xx_static_route_entries = StaticRouteEntriesType(**prop_dict) + xx_static_route_entries.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_static_route_entries = StaticRouteEntriesType() + try: + xx_static_route_entries.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('default_ce_protocol') + if prop_dict: + buf = cStringIO.StringIO() + xx_default_ce_protocol = DefaultProtocolType(**prop_dict) + xx_default_ce_protocol.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_default_ce_protocol = DefaultProtocolType() + try: + xx_default_ce_protocol.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('routing_instance_refs') or []: + if fq_name == ref_dict['to']: + abort(404, 'Cannot add reference to self') + buf = cStringIO.StringIO() + xx_routing_instance = ConnectionType(**ref_dict['attr']) + xx_routing_instance.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_routing_instance.build(node) + except Exception as e: + abort(400, str(e)) + for ref_dict in obj_dict.get('route_target_refs') or []: + buf = cStringIO.StringIO() + xx_route_target = InstanceTargetType(**ref_dict['attr']) + xx_route_target.export(buf) + node = etree.fromstring(buf.getvalue()) + try: + xx_route_target.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'routing-instance', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'routing_instance', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'routing_instance', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = ['bgp_router', 'routing_instance', 'route_target'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('routing-instance', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'routing_instance', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['routing_instance', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('routing-instance') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'routing_instance', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('routing-instance', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'routing_instance', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('routing-instance', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_routing_instance_create', obj_dict) + except Exception as e: + pass + + return {'routing-instance': rsp_body} + #end routing_instances_http_post + + def routing_instances_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'routing_instances', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('routing-instance', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'routing_instances', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'routing-instances': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('routing-instance', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('routing-instance', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('routing-instance', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'service_chain_information', u'routing_instance_is_default', u'static_route_entries', u'default_ce_protocol', u'id_perms', u'display_name'] + ['routing_instance_refs', 'route_target_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('routing-instance', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('routing-instance', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'routing-instance': obj_dict}) + + return {'routing-instances': obj_dicts} + #end routing_instances_http_get + + def _routing_instance_create_default_children(self, parent_obj): + # Create a default child only if provisioned for + r_class = self.get_resource_class('bgp-router') + if r_class and r_class.generate_default_instance: + child_obj = BgpRouter(parent_obj = parent_obj) + child_dict = child_obj.__dict__ + fq_name = child_dict['fq_name'] + child_dict['id_perms'] = self._get_default_id_perms('bgp-router') + + db_conn = self._db_conn + (ok, result) = db_conn.dbe_alloc('bgp-router', child_dict) + if not ok: + return (ok, result) + + obj_ids = result + db_conn.dbe_create('bgp-router', obj_ids, child_dict) + self._bgp_router_create_default_children(child_obj) + + pass + #end _routing_instance_create_default_children + + def _routing_instance_delete_default_children(self, parent_dict): + # Delete a default child only if provisioned for + r_class = self.get_resource_class('bgp-router') + if r_class and r_class.generate_default_instance: + # first locate default child then delete it + has_infos = parent_dict.get('bgp_routers') + if has_infos: + for has_info in has_infos: + if has_info['to'][-1] == 'default-bgp-router': + default_child_id = has_info['href'].split('/')[-1] + self.bgp_router_http_delete(default_child_id) + break + + pass + #end _routing_instance_delete_default_children + + def network_ipam_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_network_ipam_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'network_ipam': + abort(404, 'No network-ipam object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'network_ipam', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('network-ipam') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'network_ipam', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'network_ipam_mgmt', u'id_perms', u'display_name'] + references = [u'virtual_DNS_refs'] + back_references = [u'project_back_refs', u'virtual_network_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('network-ipam', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'network_ipam', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'network_ipam', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('network-ipam', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_network_ipam_read', id, rsp_body) + except Exception as e: + pass + + return {'network-ipam': rsp_body} + #end network_ipam_http_get + + def network_ipam_http_put(self, id): + key = 'network-ipam' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_network_ipam_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'network_ipam': + abort(404, 'No network-ipam object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('network_ipam_mgmt') + if prop_dict: + buf = cStringIO.StringIO() + xx_network_ipam_mgmt = IpamType(**prop_dict) + xx_network_ipam_mgmt.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_network_ipam_mgmt = IpamType() + try: + xx_network_ipam_mgmt.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'network_ipam', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'network_ipam', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = [u'virtual_DNS'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('network-ipam') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'network_ipam', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('network-ipam', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'network_ipam', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('network-ipam', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_network_ipam_update', id, obj_dict) + except Exception as e: + pass + + return {'network-ipam': rsp_body} + #end network_ipam_http_put + + def network_ipam_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'network_ipam': + abort(404, 'No network-ipam object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_network_ipam_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'project_back_refs', u'virtual_network_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('network-ipam', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'network_ipam', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'network_ipam', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'network_ipam', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('network-ipam', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('network-ipam') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + virtual_network_back_refs = read_result.get('virtual_network_back_refs', None) + if virtual_network_back_refs: + back_ref_urls = [back_ref_info['href'] for back_ref_info in read_result['virtual_network_back_refs']] + back_ref_str = ', '.join(back_ref_urls) + err_msg = 'Back-References from ' + back_ref_str + ' still exist' + self.config_object_error(id, None, 'network_ipam', 'http_delete', err_msg) + abort(409, err_msg) + + + # Delete default children first + self._network_ipam_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'network_ipam', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('network-ipam', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'network_ipam', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_network_ipam_delete', id, read_result) + except Exception as e: + pass + + #end network_ipam_http_delete + + def network_ipams_http_post(self): + key = 'network-ipam' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_network_ipam_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('network_ipam_mgmt') + if prop_dict: + buf = cStringIO.StringIO() + xx_network_ipam_mgmt = IpamType(**prop_dict) + xx_network_ipam_mgmt.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_network_ipam_mgmt = IpamType() + try: + xx_network_ipam_mgmt.build(node) + except Exception as e: + abort(400, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'network-ipam', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'network_ipam', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'network_ipam', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = [u'virtual_DNS'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('network-ipam', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'network_ipam', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['network_ipam', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('network-ipam') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'network_ipam', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('network-ipam', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'network_ipam', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('network-ipam', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_network_ipam_create', obj_dict) + except Exception as e: + pass + + return {'network-ipam': rsp_body} + #end network_ipams_http_post + + def network_ipams_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'network_ipams', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('network-ipam', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'network_ipams', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'network-ipams': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('network-ipam', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('network-ipam', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('network-ipam', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'network_ipam_mgmt', u'id_perms', u'display_name'] + [u'virtual_DNS_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('network-ipam', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('network-ipam', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'network-ipam': obj_dict}) + + return {'network-ipams': obj_dicts} + #end network_ipams_http_get + + def _network_ipam_create_default_children(self, parent_obj): + pass + #end _network_ipam_create_default_children + + def _network_ipam_delete_default_children(self, parent_dict): + pass + #end _network_ipam_delete_default_children + + def logical_router_http_get(self, id): + try: + self._extension_mgrs['resourceApi'].map_method('pre_logical_router_read', id) + except Exception as e: + pass + + # TODO get vals from request out of the global ASAP + etag = request.headers.get('If-None-Match') + try: + obj_type = self._db_conn.uuid_to_obj_type(id) + except NoIdError: + obj_type = None + if obj_type != 'logical_router': + abort(404, 'No logical-router object found for id %s' %(id)) + # common handling for all resource get + (ok, result) = self._get_common(request, id) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'logical_router', 'http_get', msg) + abort(code, msg) + + # type-specific hook + r_class = self.get_resource_class('logical-router') + if r_class: + r_class.http_get(id) + + db_conn = self._db_conn + if etag: + obj_ids = {'uuid': id} + (ok, result) = db_conn.dbe_is_latest(obj_ids, etag.replace('"', '')) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'logical_router', 'http_get', result) + abort(404, result) + + is_latest = result + if is_latest: + # send Not-Modified, caches use this for read optimization + response.status = 304 + return + #end if etag + + obj_ids = {'uuid': id} + + # Generate field list for db layer + properties = [u'id_perms', u'display_name'] + references = ['virtual_machine_interface_refs', 'route_target_refs', u'virtual_network_refs', u'service_instance_refs'] + back_references = [u'project_back_refs'] + children = [] + if 'fields' in request.query: + obj_fields = request.query.fields.split(',') + else: # default props + children + refs + backrefs + obj_fields = properties + references + if 'exclude_back_refs' not in request.query: + obj_fields = obj_fields + back_references + if 'exclude_children' not in request.query: + obj_fields = obj_fields + children + + (ok, result) = db_conn.dbe_read('logical-router', obj_ids, obj_fields) + if not ok: + # Not present in DB + self.config_object_error(id, None, 'logical_router', 'http_get', result) + abort(404, result) + + # check visibility + if (not result['id_perms'].get('user_visible', True) and + not self.is_admin_request()): + result = 'This object is not visible by users: %s' % id + self.config_object_error(id, None, 'logical_router', 'http_get', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('logical-router', id) + rsp_body['name'] = result['fq_name'][-1] + rsp_body.update(result) + id_perms = result['id_perms'] + response.set_header('ETag', '"' + id_perms['last_modified'] + '"') + try: + self._extension_mgrs['resourceApi'].map_method('post_logical_router_read', id, rsp_body) + except Exception as e: + pass + + return {'logical-router': rsp_body} + #end logical_router_http_get + + def logical_router_http_put(self, id): + key = 'logical-router' + obj_dict = request.json[key] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_logical_router_update', id, obj_dict) + except Exception as e: + pass + + db_conn = self._db_conn + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'logical_router': + abort(404, 'No logical-router object found for id %s' %(id)) + fq_name = db_conn.uuid_to_fq_name(id) + except NoIdError as e: + abort(404, str(e)) + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource put + (ok, result) = self._put_common(request, 'logical_router', id, fq_name, obj_dict) + if not ok: + (code, msg) = result + self.config_object_error(id, None, 'logical_router', 'http_put', msg) + abort(code, msg) + + # Validate perms + objtype_list = ['virtual_machine_interface', 'route_target', u'virtual_network', u'service_instance'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + try: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + except NoIdError as e: + abort(404, str(e)) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # type-specific hook + r_class = self.get_resource_class('logical-router') + if r_class: + (ok, put_result) = r_class.http_put(id, fq_name, obj_dict, self._db_conn) + if not ok: + (code, msg) = put_result + self.config_object_error(id, None, 'logical_router', 'http_put', msg) + abort(code, msg) + callable = getattr(r_class, 'http_put_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, fq_name, obj_dict, self._db_conn])) + + obj_ids = {'uuid': id} + try: + (ok, result) = db_conn.dbe_update('logical-router', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'logical_router', 'http_put', result) + abort(404, result) + + rsp_body = {} + rsp_body['uuid'] = id + rsp_body['href'] = self.generate_url('logical-router', id) + + try: + self._extension_mgrs['resourceApi'].map_method('post_logical_router_update', id, obj_dict) + except Exception as e: + pass + + return {'logical-router': rsp_body} + #end logical_router_http_put + + def logical_router_http_delete(self, id): + db_conn = self._db_conn + # if obj doesn't exist return early + try: + obj_type = db_conn.uuid_to_obj_type(id) + if obj_type != 'logical_router': + abort(404, 'No logical-router object found for id %s' %(id)) + _ = db_conn.uuid_to_fq_name(id) + except NoIdError: + abort(404, 'ID %s does not exist' %(id)) + + try: + self._extension_mgrs['resourceApi'].map_method('pre_logical_router_delete', id) + except Exception as e: + pass + + # read in obj from db (accepting error) to get details of it + obj_ids = {'uuid': id} + back_references = [u'project_back_refs'] + children = [] + obj_fields = children + back_references + (read_ok, read_result) = db_conn.dbe_read('logical-router', obj_ids, obj_fields) + if not read_ok: + if read_result.startswith('Unknown id:'): + abort(404, 'ID %s does not exist' %(id)) + else: + self.config_object_error(id, None, 'logical_router', 'http_delete', read_result) + # proceed down to delete the resource + + # common handling for all resource delete + parent_type = read_result.get('parent_type', None) + (ok, del_result) = self._delete_common(request, 'logical_router', id, parent_type) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'logical_router', 'http_delete', msg) + abort(code, msg) + + fq_name = read_result['fq_name'] + ifmap_id = cfgm_common.imid.get_ifmap_id_from_fq_name('logical-router', fq_name) + obj_ids['imid'] = ifmap_id + if parent_type: + parent_imid = cfgm_common.imid.get_ifmap_id_from_fq_name(parent_type, fq_name[:-1]) + obj_ids['parent_imid'] = parent_imid + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + + # type-specific hook + r_class = self.get_resource_class('logical-router') + if r_class: + if read_ok: + # fail if non-default children or backrefs exist + + # Delete default children first + self._logical_router_delete_default_children(read_result) + + (ok, del_result) = r_class.http_delete(id, read_result, db_conn) + if not ok: + (code, msg) = del_result + self.config_object_error(id, None, 'logical_router', 'http_delete', msg) + abort(code, msg) + callable = getattr(r_class, 'http_delete_fail', None) + if callable: + cleanup_on_failure.append((callable, [id, read_result, db_conn])) + #end if read_ok + + try: + (ok, del_result) = db_conn.dbe_delete('logical-router', obj_ids, read_result) + except Exception as e: + ok = False + del_result = str(e) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + self.config_object_error(id, None, 'logical_router', 'http_delete', del_result) + abort(409, del_result) + + try: + self._extension_mgrs['resourceApi'].map_method('post_logical_router_delete', id, read_result) + except Exception as e: + pass + + #end logical_router_http_delete + + def logical_routers_http_post(self): + key = 'logical-router' + obj_dict = request.json[key] + self._post_validate(key, obj_dict=obj_dict) + fq_name = obj_dict['fq_name'] + + try: + self._extension_mgrs['resourceApi'].map_method('pre_logical_router_create', obj_dict) + except Exception as e: + pass + + prop_dict = obj_dict.get('id_perms') + if prop_dict: + buf = cStringIO.StringIO() + xx_id_perms = IdPermsType(**prop_dict) + xx_id_perms.export(buf) + node = etree.fromstring(buf.getvalue()) + xx_id_perms = IdPermsType() + try: + xx_id_perms.build(node) + except Exception as e: + abort(400, str(e)) + # common handling for all resource create + (ok, result) = self._post_common(request, 'logical-router', obj_dict) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict.get('fq_name', [])) + self.config_object_error(None, fq_name_str, 'logical_router', 'http_post', msg) + abort(code, msg) + + name = obj_dict['fq_name'][-1] + fq_name = obj_dict['fq_name'] + + db_conn = self._db_conn + + # if client gave parent_type of config-root, ignore and remove + if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root': + del obj_dict['parent_type'] + + if 'parent_type' in obj_dict: + # non config-root child, verify parent exists + parent_type = obj_dict['parent_type'] + parent_fq_name = obj_dict['fq_name'][:-1] + try: + parent_uuid = self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name) + (ok, status) = self._permissions.check_perms_write(request, parent_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + self._permissions.set_user_role(request, obj_dict) + except NoIdError: + err_msg = 'Parent ' + pformat(parent_fq_name) + ' type ' + parent_type + ' does not exist' + fq_name_str = ':'.join(parent_fq_name) + self.config_object_error(None, fq_name_str, 'logical_router', 'http_post', err_msg) + abort(400, err_msg) + + # Validate perms + objtype_list = ['virtual_machine_interface', 'route_target', u'virtual_network', u'service_instance'] + for obj_type in objtype_list: + refs = obj_dict.get('%s_refs'%(obj_type), None) + if refs: + for ref in refs: + ref_uuid = db_conn.fq_name_to_uuid(obj_type, ref['to']) + (ok, status) = self._permissions.check_perms_link(request, ref_uuid) + if not ok: + (code, err_msg) = status + abort(code, err_msg) + + # State modification starts from here. Ensure that cleanup is done for all state changes + cleanup_on_failure = [] + # Alloc and Store id-mappings before creating entry on pubsub store. + # Else a subscriber can ask for an id mapping before we have stored it + uuid_requested = result + (ok, result) = db_conn.dbe_alloc('logical-router', obj_dict, uuid_requested) + if not ok: + (code, msg) = result + fq_name_str = ':'.join(obj_dict['fq_name']) + self.config_object_error(None, fq_name_str, 'logical_router', 'http_post', result) + abort(code, msg) + cleanup_on_failure.append((db_conn.dbe_release, ['logical_router', fq_name])) + + obj_ids = result + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + + + # type-specific hook + r_class = self.get_resource_class('logical-router') + if r_class: + try: + (ok, result) = r_class.http_post_collection(tenant_name, obj_dict, db_conn) + except Exception as e: + ok = False + result = (500, str(e)) + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + (code, msg) = result + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'logical_router', 'http_post', msg) + abort(code, msg) + callable = getattr(r_class, 'http_post_collection_fail', None) + if callable: + cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn])) + + try: + (ok, result) = \ + db_conn.dbe_create('logical-router', obj_ids, obj_dict) + except Exception as e: + ok = False + result = str(e) + + if not ok: + for fail_cleanup_callable, cleanup_args in cleanup_on_failure: + fail_cleanup_callable(*cleanup_args) + fq_name_str = ':'.join(fq_name) + self.config_object_error(None, fq_name_str, 'logical_router', 'http_post', result) + abort(404, result) + + rsp_body = {} + rsp_body['name'] = name + rsp_body['fq_name'] = fq_name + rsp_body['uuid'] = obj_ids['uuid'] + rsp_body['href'] = self.generate_url('logical-router', obj_ids['uuid']) + if 'parent_type' in obj_dict: + # non config-root child, send back parent uuid/href + rsp_body['parent_uuid'] = parent_uuid + rsp_body['parent_href'] = self.generate_url(parent_type, parent_uuid) + + try: + self._extension_mgrs['resourceApi'].map_method('post_logical_router_create', obj_dict) + except Exception as e: + pass + + return {'logical-router': rsp_body} + #end logical_routers_http_post + + def logical_routers_http_get(self): + # gather list of uuids using 1. any specified anchors + # 2. any specified filters + # if not 'detail' return list with any specified 'fields' + # if 'detail' return list with props+refs + any specified 'fields' + + env = request.headers.environ + tenant_name = env.get(hdr_server_tenant(), 'default-project') + parent_uuids = None + back_ref_uuids = None + obj_uuids = None + if (('parent_fq_name_str' in request.query) and + ('parent_type' in request.query)): + parent_fq_name = request.query.parent_fq_name_str.split(':') + parent_type = request.query.parent_type + parent_uuids = [self._db_conn.fq_name_to_uuid(parent_type, parent_fq_name)] + elif 'parent_id' in request.query: + parent_ids = request.query.parent_id.split(',') + parent_uuids = [str(uuid.UUID(p_uuid)) for p_uuid in parent_ids] + if 'back_ref_id' in request.query: + back_ref_ids = request.query.back_ref_id.split(',') + back_ref_uuids = [str(uuid.UUID(b_uuid)) for b_uuid in back_ref_ids] + if 'obj_uuids' in request.query: + obj_uuids = request.query.obj_uuids.split(',') + + # common handling for all resource get + (ok, result) = self._get_common(request, parent_uuids) + if not ok: + (code, msg) = result + self.config_object_error(None, None, 'logical_routers', 'http_get_collection', msg) + abort(code, msg) + + if 'count' in request.query: + count = 'true' in request.query.count.lower() + else: + count = False + + filter_params = request.query.filters + if filter_params: + try: + ff_key_vals = filter_params.split(',') + ff_names = [ff.split('==')[0] for ff in ff_key_vals] + ff_values = [ff.split('==')[1] for ff in ff_key_vals] + filters = {'field_names': ff_names, 'field_values': ff_values} + except Exception as e: + abort(400, 'Invalid filter ' + filter_params) + else: + filters = None + db_conn = self._db_conn + (ok, result) = \ + db_conn.dbe_list('logical-router', parent_uuids, back_ref_uuids, obj_uuids, count, filters) + if not ok: + self.config_object_error(None, None, 'logical_routers', 'http_get_collection', result) + abort(404, result) + + # If only counting, return early + if count: + return {'logical-routers': {'count': result}} + + if 'detail' in request.query: + detail = 'true' in request.query.detail.lower() + else: + detail = False + + fq_names_uuids = result + obj_dicts = [] + if not detail: + if not self.is_admin_request(): + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms'] + (ok, result) = db_conn.dbe_read_multi('logical-router', obj_ids_list, obj_fields) + if not ok: + result = [] + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + if obj_result['id_perms'].get('user_visible', True): + obj_dict = {} + obj_dict['uuid'] = obj_result['uuid'] + obj_dict['href'] = self.generate_url('logical-router', obj_result['uuid']) + obj_dict['fq_name'] = obj_result['fq_name'] + obj_dicts.append(obj_dict) + else: + for fq_name, obj_uuid in fq_names_uuids: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['uuid'] = obj_uuid + obj_dict['href'] = self.generate_url('logical-router', obj_uuid) + obj_dict['fq_name'] = fq_name + obj_dicts.append(obj_dict) + else: #detail + obj_ids_list = [{'uuid': obj_uuid} for _, obj_uuid in fq_names_uuids] + obj_fields = [u'id_perms', u'display_name'] + ['virtual_machine_interface_refs', 'route_target_refs', u'virtual_network_refs', u'service_instance_refs'] + if 'fields' in request.query: + obj_fields.extend(request.query.fields.split(',')) + (ok, result) = db_conn.dbe_read_multi('logical-router', obj_ids_list, obj_fields) + + if not ok: + result = [] + + for obj_result in result: + # give chance for zk heartbeat/ping + gevent.sleep(0) + obj_dict = {} + obj_dict['name'] = obj_result['fq_name'][-1] + obj_dict['href'] = self.generate_url('logical-router', obj_result['uuid']) + obj_dict.update(obj_result) + if (obj_dict['id_perms'].get('user_visible', True) or + self.is_admin_request()): + obj_dicts.append({'logical-router': obj_dict}) + + return {'logical-routers': obj_dicts} + #end logical_routers_http_get + + def _logical_router_create_default_children(self, parent_obj): + pass + #end _logical_router_create_default_children + + def _logical_router_delete_default_children(self, parent_dict): + pass + #end _logical_router_delete_default_children + +#end class VncApiServerGen + +class DefaultsGen(object): + def __init__(self): + self.perms = {} + default_perms = self._common_default_perms + id_perms = IdPermsType(default_perms, None, True, 0, 0) + self.perms['domain'] = id_perms + self.perms['global-vrouter-config'] = id_perms + self.perms['instance-ip'] = id_perms + self.perms['network-policy'] = id_perms + self.perms['loadbalancer-pool'] = id_perms + self.perms['virtual-DNS-record'] = id_perms + self.perms['route-target'] = id_perms + self.perms['floating-ip'] = id_perms + self.perms['floating-ip-pool'] = id_perms + self.perms['physical-router'] = id_perms + self.perms['bgp-router'] = id_perms + self.perms['virtual-router'] = id_perms + self.perms['config-root'] = id_perms + self.perms['subnet'] = id_perms + self.perms['global-system-config'] = id_perms + self.perms['service-appliance'] = id_perms + self.perms['service-instance'] = id_perms + self.perms['namespace'] = id_perms + self.perms['logical-interface'] = id_perms + self.perms['route-table'] = id_perms + self.perms['physical-interface'] = id_perms + self.perms['access-control-list'] = id_perms + self.perms['analytics-node'] = id_perms + self.perms['virtual-DNS'] = id_perms + self.perms['customer-attachment'] = id_perms + self.perms['service-appliance-set'] = id_perms + self.perms['config-node'] = id_perms + self.perms['qos-queue'] = id_perms + self.perms['virtual-machine'] = id_perms + self.perms['interface-route-table'] = id_perms + self.perms['service-template'] = id_perms + self.perms['virtual-ip'] = id_perms + self.perms['loadbalancer-member'] = id_perms + self.perms['security-group'] = id_perms + self.perms['provider-attachment'] = id_perms + self.perms['virtual-machine-interface'] = id_perms + self.perms['loadbalancer-healthmonitor'] = id_perms + self.perms['virtual-network'] = id_perms + self.perms['project'] = id_perms + self.perms['qos-forwarding-class'] = id_perms + self.perms['database-node'] = id_perms + self.perms['routing-instance'] = id_perms + self.perms['network-ipam'] = id_perms + self.perms['logical-router'] = id_perms + + self.resource = {} + + #end __init__ +#end class DefaultsGen |