summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CONTRIBUTING.rst17
-rw-r--r--HACKING.rst7
-rw-r--r--INFO21
-rw-r--r--LICENSE13
-rw-r--r--MANIFEST.in6
-rw-r--r--README.rst60
-rw-r--r--__init__.py0
-rw-r--r--babel.cfg2
-rw-r--r--devstack/README.md31
-rw-r--r--devstack/plugin.sh25
-rw-r--r--devstack/settings8
-rw-r--r--doc/source/alembic_migration.rst102
-rwxr-xr-xdoc/source/api.rst626
-rw-r--r--doc/source/command_extensions.rst64
-rwxr-xr-xdoc/source/conf.py76
-rw-r--r--doc/source/contribution.rst29
-rw-r--r--doc/source/index.rst66
-rw-r--r--doc/source/installation.rst37
-rw-r--r--doc/source/ovs_driver_and_agent_workflow.rst311
-rw-r--r--doc/source/system_design and_workflow.rst245
-rw-r--r--doc/source/usage.rst32
-rw-r--r--docs/release/userguide/feature.userguide.rst58
-rw-r--r--etc/neutron/rootwrap.d/networking-sfc.filters9
-rw-r--r--networking_sfc/__init__.py19
-rw-r--r--networking_sfc/cli/__init__.py0
-rw-r--r--networking_sfc/cli/flow_classifier.py202
-rw-r--r--networking_sfc/cli/port_chain.py141
-rw-r--r--networking_sfc/cli/port_pair.py124
-rw-r--r--networking_sfc/cli/port_pair_group.py114
-rw-r--r--networking_sfc/db/__init__.py0
-rw-r--r--networking_sfc/db/flowclassifier_db.py211
-rw-r--r--networking_sfc/db/migration/README3
-rw-r--r--networking_sfc/db/migration/__init__.py0
-rw-r--r--networking_sfc/db/migration/alembic_migrations/__init__.py0
-rw-r--r--networking_sfc/db/migration/alembic_migrations/env.py88
-rw-r--r--networking_sfc/db/migration/alembic_migrations/script.py.mako36
-rw-r--r--networking_sfc/db/migration/alembic_migrations/versions/HEADS2
-rw-r--r--networking_sfc/db/migration/alembic_migrations/versions/liberty/contract/48072cb59133_initial.py33
-rw-r--r--networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/24fc7241aa5_initial.py33
-rw-r--r--networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/5a475fc853e6_ovs_data_model.py87
-rw-r--r--networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/9768e6a66c9_flowclassifier_data_model.py61
-rw-r--r--networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/c3e178d4a985_sfc_data_model.py119
-rw-r--r--networking_sfc/db/migration/alembic_migrations/versions/start_networking_sfc.py30
-rw-r--r--networking_sfc/db/migration/models/__init__.py0
-rw-r--r--networking_sfc/db/migration/models/head.py23
-rw-r--r--networking_sfc/db/sfc_db.py553
-rw-r--r--networking_sfc/extensions/__init__.py0
-rw-r--r--networking_sfc/extensions/flowclassifier.py304
-rw-r--r--networking_sfc/extensions/sfc.py382
-rw-r--r--networking_sfc/services/__init__.py0
-rw-r--r--networking_sfc/services/flowclassifier/__init__.py0
-rw-r--r--networking_sfc/services/flowclassifier/common/__init__.py0
-rw-r--r--networking_sfc/services/flowclassifier/common/config.py27
-rw-r--r--networking_sfc/services/flowclassifier/common/context.py37
-rw-r--r--networking_sfc/services/flowclassifier/common/exceptions.py31
-rw-r--r--networking_sfc/services/flowclassifier/driver_manager.py104
-rw-r--r--networking_sfc/services/flowclassifier/drivers/__init__.py0
-rw-r--r--networking_sfc/services/flowclassifier/drivers/base.py33
-rw-r--r--networking_sfc/services/flowclassifier/drivers/dummy/__init__.py0
-rw-r--r--networking_sfc/services/flowclassifier/drivers/dummy/dummy.py35
-rw-r--r--networking_sfc/services/flowclassifier/plugin.py113
-rw-r--r--networking_sfc/services/sfc/__init__.py0
-rw-r--r--networking_sfc/services/sfc/agent/__init__.py2
-rw-r--r--networking_sfc/services/sfc/agent/agent.py891
-rw-r--r--networking_sfc/services/sfc/agent/br_int.py48
-rw-r--r--networking_sfc/services/sfc/agent/br_phys.py34
-rw-r--r--networking_sfc/services/sfc/agent/br_tun.py35
-rw-r--r--networking_sfc/services/sfc/common/__init__.py0
-rw-r--r--networking_sfc/services/sfc/common/config.py27
-rw-r--r--networking_sfc/services/sfc/common/context.py85
-rw-r--r--networking_sfc/services/sfc/common/exceptions.py46
-rw-r--r--networking_sfc/services/sfc/common/ovs_ext_lib.py187
-rw-r--r--networking_sfc/services/sfc/driver_manager.py118
-rw-r--r--networking_sfc/services/sfc/drivers/__init__.py0
-rw-r--r--networking_sfc/services/sfc/drivers/base.py57
-rw-r--r--networking_sfc/services/sfc/drivers/dummy/__init__.py0
-rw-r--r--networking_sfc/services/sfc/drivers/dummy/dummy.py59
-rw-r--r--networking_sfc/services/sfc/drivers/ovs/__init__.py0
-rw-r--r--networking_sfc/services/sfc/drivers/ovs/constants.py57
-rw-r--r--networking_sfc/services/sfc/drivers/ovs/db.py426
-rw-r--r--networking_sfc/services/sfc/drivers/ovs/driver.py1076
-rw-r--r--networking_sfc/services/sfc/drivers/ovs/rpc.py112
-rw-r--r--networking_sfc/services/sfc/drivers/ovs/rpc_topics.py21
-rw-r--r--networking_sfc/services/sfc/plugin.py200
-rw-r--r--networking_sfc/tests/__init__.py0
-rw-r--r--networking_sfc/tests/base.py134
-rw-r--r--networking_sfc/tests/unit/__init__.py0
-rw-r--r--networking_sfc/tests/unit/cli/__init__.py0
-rw-r--r--networking_sfc/tests/unit/cli/test_flow_classifier.py182
-rw-r--r--networking_sfc/tests/unit/cli/test_port_chain.py186
-rw-r--r--networking_sfc/tests/unit/cli/test_port_pair.py160
-rw-r--r--networking_sfc/tests/unit/cli/test_port_pair_group.py144
-rw-r--r--networking_sfc/tests/unit/db/__init__.py0
-rw-r--r--networking_sfc/tests/unit/db/test_flowclassifier_db.py677
-rw-r--r--networking_sfc/tests/unit/db/test_sfc_db.py1490
-rw-r--r--networking_sfc/tests/unit/extensions/__init__.py0
-rw-r--r--networking_sfc/tests/unit/extensions/test_flowclassifier.py603
-rw-r--r--networking_sfc/tests/unit/extensions/test_sfc.py751
-rw-r--r--networking_sfc/tests/unit/services/__init__.py0
-rw-r--r--networking_sfc/tests/unit/services/flowclassifier/__init__.py0
-rw-r--r--networking_sfc/tests/unit/services/flowclassifier/test_driver_manager.py158
-rw-r--r--networking_sfc/tests/unit/services/flowclassifier/test_plugin.py168
-rw-r--r--networking_sfc/tests/unit/services/sfc/__init__.py0
-rw-r--r--networking_sfc/tests/unit/services/sfc/agent/__init__.py0
-rw-r--r--networking_sfc/tests/unit/services/sfc/agent/test-agent.py4012
-rw-r--r--networking_sfc/tests/unit/services/sfc/common/__init__.py0
-rw-r--r--networking_sfc/tests/unit/services/sfc/common/test_ovs_ext_lib.py93
-rw-r--r--networking_sfc/tests/unit/services/sfc/drivers/__init__.py0
-rw-r--r--networking_sfc/tests/unit/services/sfc/drivers/ovs/__init__.py0
-rw-r--r--networking_sfc/tests/unit/services/sfc/test_driver_manager.py325
-rw-r--r--networking_sfc/tests/unit/services/sfc/test_plugin.py468
-rw-r--r--onboarding.txt8
-rw-r--r--requirements.txt52
-rw-r--r--setup.cfg66
-rw-r--r--setup.py29
-rw-r--r--test-requirements.txt22
-rw-r--r--tools/check_i18n.py153
-rw-r--r--tools/check_i18n_test_case.txt67
-rwxr-xr-xtools/check_unit_test_structure.sh52
-rwxr-xr-xtools/clean.sh5
-rw-r--r--tools/i18n_cfg.py97
-rw-r--r--tools/install_venv.py72
-rw-r--r--tools/install_venv_common.py172
-rwxr-xr-xtools/pretty_tox.sh6
-rwxr-xr-xtools/subunit-trace.py307
-rwxr-xr-xtools/tox_install.sh22
-rwxr-xr-xtools/with_venv.sh19
-rw-r--r--tox.ini67
128 files changed, 29 insertions, 19009 deletions
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
deleted file mode 100644
index 57477f0..0000000
--- a/CONTRIBUTING.rst
+++ /dev/null
@@ -1,17 +0,0 @@
-If you would like to contribute to the development of OpenStack, you must
-follow the steps in this page:
-
- http://docs.openstack.org/infra/manual/developers.html
-
-If you already have a good understanding of how the system works and your
-OpenStack accounts are set up, you can skip to the development workflow
-section of this documentation to learn how changes to OpenStack should be
-submitted for review via the Gerrit tool:
-
- http://docs.openstack.org/infra/manual/developers.html#development-workflow
-
-Pull requests submitted through GitHub will be ignored.
-
-Bugs should be filed on Launchpad, not GitHub:
-
- https://bugs.launchpad.net/neutron
diff --git a/HACKING.rst b/HACKING.rst
deleted file mode 100644
index 07dd2aa..0000000
--- a/HACKING.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Networking SFC Style Commandments
-=================================
-
-Please see the Neutron HACKING.rst file for style commandments for
-networking-sfc:
-
-`Neutron HACKING.rst <http://git.openstack.org/cgit/openstack/neutron/tree/HACKING.rst>`_
diff --git a/INFO b/INFO
deleted file mode 100644
index ff13c87..0000000
--- a/INFO
+++ /dev/null
@@ -1,21 +0,0 @@
-Repo name: vnf_forwarding_graph
-Project Category: Requirement
-Lifecycle State:
-Primary Contact: cathy.h.zhang@huawei.com
-Project Lead: cathy.h.zhang@huawei.com
-Jira Project Name: vnf forwarding grap
-Jira Project Prefix: vfngraph
-mailing list tag [vnfgraph]
-
-Committers:
-cathy.h.zhang@huawei.com
-louis.fourie@huawei.com
-uyijun@huawei.com
-david.lenrow@hp.com
-saddepalli@freescale.com
-nicolas.bouthors@qosmos.com
-balaji.p@freescale.com
-murali.yadav@calsoftlabs.com
-
-Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/
-Link(s) to approval of additional submitters:
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index eab0924..0000000
--- a/LICENSE
+++ /dev/null
@@ -1,13 +0,0 @@
-Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644
index c978a52..0000000
--- a/MANIFEST.in
+++ /dev/null
@@ -1,6 +0,0 @@
-include AUTHORS
-include ChangeLog
-exclude .gitignore
-exclude .gitreview
-
-global-exclude *.pyc
diff --git a/README.rst b/README.rst
deleted file mode 100644
index e0edf3d..0000000
--- a/README.rst
+++ /dev/null
@@ -1,60 +0,0 @@
-============================================================
-Service Function Chaining Extension for OpenStack Networking
-============================================================
-
-Service Function Chaining API Status
-------------------------------------
-
-This project has not been released yet, and as such, the API should be
-considered experimental. This means the SFC API could undergo changes which
-could be backwards incompatible while it is under development. The goal is to
-allow backend implementations to experiment with the API at the same time as
-it is being developed. Once a release is made, this documentation will be
-updated to remove this warning.
-
-This project provides APIs and implementations to support
-Service Function Chaining in Neutron.
-
-Service Function Chaining is a mechanism for overriding the basic destination
-based forwarding that is typical of IP networks. It is conceptually related
-to Policy Based Routing in physical networks but it is typically thought of as
-a Software Defined Networking technology. It is often used in conjunction with
-security functions although it may be used for a broader range of features.
-Fundamentally SFC is the ability to cause network packet flows to route through
-a network via a path other than the one that would be chosen by routing table
-lookups on the packet's destination IP address. It is most commonly used in
-conjunction with Network Function Virtualization when recreating in a virtual
-environment a series of network functions that would have traditionally been
-implemented as a collection of physical network devices connected in series
-by cables.
-
-A very simple example of a service chain would be one that forces all traffic
-from point A to point B to go through a firewall even though the firewall is
-not literally between point A and B from a routing table perspective.
-
-A more complex example is an ordered series of functions, each implemented in
-multiple VMs, such that traffic must flow through one VM at each hop in the
-chain but the network uses a hashing algorithm to distribute different flows
-across multiple VMs at each hop.
-
-* Free software: Apache license
-* Source: http://git.openstack.org/cgit/openstack/networking-sfc
-* Overview: https://launchpad.net/networking-sfc
-* Bugs: http://bugs.launchpad.net/networking-sfc
-* Blueprints: https://blueprints.launchpad.net/networking-sfc
-
-Features
---------
-
-* Creation of Service Function Chains consisting of an ordered sequence of Service Functions. SFs are virtual machines (or potentially physical devices) that perform a network function such as firewall, content cache, packet inspection, or any other function that requires processing of packets in a flow from point A to point B.
-* Reference implementation with Open vSwitch
-* Flow classification mechanism (ability to select and act on traffic)
-* Vendor neutral API
-* Modular plugin driver architecture
-
-Background on the Subject of Service Function Chaining
-------------------------------------------------------
-* Original Neutron bug (request for enhancement): https://bugs.launchpad.net/neutron/+bug/1450617
-* https://blueprints.launchpad.net/neutron/+spec/neutron-api-extension-for-service-chaining
-* https://blueprints.launchpad.net/neutron/+spec/common-service-chaining-driver-api
-* https://wiki.opnfv.org/requirements_projects/openstack_based_vnf_forwarding_graph
diff --git a/__init__.py b/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/__init__.py
+++ /dev/null
diff --git a/babel.cfg b/babel.cfg
deleted file mode 100644
index 15cd6cb..0000000
--- a/babel.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-[python: **.py]
-
diff --git a/devstack/README.md b/devstack/README.md
deleted file mode 100644
index 4093e5c..0000000
--- a/devstack/README.md
+++ /dev/null
@@ -1,31 +0,0 @@
-This directory contains the networking-sfc devstack plugin. To
-configure the networking sfc, in the [[local|localrc]] section,
-you will need to enable the networking-sfc devstack plugin by
- editing the [[local|localrc]] section of your local.conf file.
-
-1) Enable the plugin
-
-To enable the plugin, add a line of the form:
-
- enable_plugin networking-sfc <GITURL> [GITREF]
-
-where
-
- <GITURL> is the URL of a networking-sfc repository
- [GITREF] is an optional git ref (branch/ref/tag). The default is
- master.
-
-For example
-
- If you have already cloned the networking-sfc repository (which is
- useful when testing unmerged changes)
-
- enable_plugin networking-sfc /opt/stack/networking-sfc
-
- Or, if you want to pull the networking-sfc repository from Github
- and use a particular branch (for example Liberty, here)
-
- enable_plugin networking-sfc git://git.openstack.org/openstack/networking-sfc stable/liberty
-
-For more information, see the "Externally Hosted Plugins" section of
-http://docs.openstack.org/developer/devstack/plugins.html.
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
deleted file mode 100644
index 04a87bb..0000000
--- a/devstack/plugin.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-# function definitions for networking-sfc devstack plugin
-
-function networking_sfc_install {
- setup_develop $NETWORKING_SFC_DIR
-}
-
-function networking_sfc_configure_common {
- _neutron_service_plugin_class_add $NEUTRON_FLOWCLASSIFIER_PLUGIN
- _neutron_service_plugin_class_add $NEUTRON_SFC_PLUGIN
- iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
- iniadd $NEUTRON_CONF sfc drivers $NEUTRON_SFC_DRIVERS
- _neutron_deploy_rootwrap_filters $NETWORKING_SFC_DIR
- neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE --subproject networking-sfc upgrade head
-}
-
-
-if [[ "$1" == "stack" && "$2" == "install" ]]; then
- # Perform installation of service source
- echo_summary "Installing networking-sfc"
- networking_sfc_install
-
-elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
- echo_summary "Configuring networking-sfc"
- networking_sfc_configure_common
-fi
diff --git a/devstack/settings b/devstack/settings
deleted file mode 100644
index 6fe3581..0000000
--- a/devstack/settings
+++ /dev/null
@@ -1,8 +0,0 @@
-# settings for networking-sfc devstack plugin
-
-NETWORKING_SFC_DIR=${NETWORKING_SFC_DIR:-"$DEST/networking-sfc"}
-
-NEUTRON_FLOWCLASSIFIER_PLUGIN=${NEUTRON_FLOWCLASSIFIER_PLUGIN:="networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin"}
-NEUTRON_SFC_PLUGIN=${NEUTRON_SFC_PLUGIN:-"networking_sfc.services.sfc.plugin.SfcPlugin"}
-
-NEUTRON_SFC_DRIVERS=${NEUTRON_SFC_DRIVERS:-"ovs"}
diff --git a/doc/source/alembic_migration.rst b/doc/source/alembic_migration.rst
deleted file mode 100644
index bb225af..0000000
--- a/doc/source/alembic_migration.rst
+++ /dev/null
@@ -1,102 +0,0 @@
-..
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-
- Convention for heading levels in Neutron devref:
- ======= Heading 0 (reserved for the title in a document)
- ------- Heading 1
- ~~~~~~~ Heading 2
- +++++++ Heading 3
- ''''''' Heading 4
- (Avoid deeper levels because they do not render well.)
-
-
-Alembic-migration
-=================
-
-Using alembic-migration, required data modeling for networking-sfc is defined and
-applied to the database. Refer to `Neutron alembic migration process <http://docs.openstack.org/developer/neutron/devref/alembic_migrations.html>`_ for further details.
-
-Important operations:
----------------------
-
-Checking migration:
-~~~~~~~~~~~~~~~~~~~
-
-::
-
- neutron-db-manage --subproject networking-sfc check_migration
- Running branches for networking-sfc ...
- start_networking_sfc (branchpoint)
- -> 48072cb59133 (contract) (head)
- -> 24fc7241aa5 (expand)
-
- OK
-
-Checking branch information:
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-::
-
- neutron-db-manage --subproject networking-sfc branches
- Running branches for networking-sfc ...
- start_networking_sfc (branchpoint)
- -> 48072cb59133 (contract) (head)
- -> 24fc7241aa5 (expand)
-
- OK
-
-Checking migration history:
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-::
-
- neutron-db-manage --subproject networking-sfc history
- Running history for networking-sfc ...
- 9768e6a66c9 -> 5a475fc853e6 (expand) (head), Defining OVS data-model
- 24fc7241aa5 -> 9768e6a66c9 (expand), Defining flow-classifier data-model
- start_networking_sfc -> 24fc7241aa5 (expand), Defining Port Chain data-model.
- start_networking_sfc -> 48072cb59133 (contract) (head), Initial Liberty no-op script.
- <base> -> start_networking_sfc (branchpoint), start networking-sfc chain
-
-Applying changes:
-~~~~~~~~~~~~~~~~~
-
-::
-
- neutron-db-manage --subproject networking-sfc upgrade head
- INFO [alembic.runtime.migration] Context impl MySQLImpl.
- INFO [alembic.runtime.migration] Will assume non-transactional DDL.
- Running upgrade for networking-sfc ...
- INFO [alembic.runtime.migration] Context impl MySQLImpl.
- INFO [alembic.runtime.migration] Will assume non-transactional DDL.
- INFO [alembic.runtime.migration] Running upgrade -> start_networking_sfc, start networking-sfc chain
- INFO [alembic.runtime.migration] Running upgrade start_networking_sfc -> 48072cb59133, Initial Liberty no-op script.
- INFO [alembic.runtime.migration] Running upgrade start_networking_sfc -> 24fc7241aa5, Defining Port Chain data-model.
- INFO [alembic.runtime.migration] Running upgrade 24fc7241aa5 -> 9768e6a66c9, Defining flow-classifier data-model
- INFO [alembic.runtime.migration] Running upgrade 9768e6a66c9 -> 5a475fc853e6, Defining OVS data-model
- OK
-
-Checking current version:
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-::
-
- neutron-db-manage --subproject networking-sfc current
- Running current for networking-sfc ...
- INFO [alembic.runtime.migration] Context impl MySQLImpl.
- INFO [alembic.runtime.migration] Will assume non-transactional DDL.
- 48072cb59133 (head)
- 5a475fc853e6 (head)
- OK
-
diff --git a/doc/source/api.rst b/doc/source/api.rst
deleted file mode 100755
index aaa079a..0000000
--- a/doc/source/api.rst
+++ /dev/null
@@ -1,626 +0,0 @@
-..
- Copyright 2015 Futurewei. All rights reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-
- Convention for heading levels in Neutron devref:
- ======= Heading 0 (reserved for the title in a document)
- ------- Heading 1
- ~~~~~~~ Heading 2
- +++++++ Heading 3
- ''''''' Heading 4
- (Avoid deeper levels because they do not render well.)
-
-
-=========
-API Model
-=========
-
-Problem Description
-===================
-
-Currently Neutron does not support service function chaining. To support
-service function chaining, Service VMs must be attached at points in the
-network and then traffic must be steered between these attachment
-points. Please refer to `Neutron Service Chain blue-print <https://blueprints.launchpad.net/neutron/+spec/neutron-api-extension-for-service-chaining>`_ and Bugs `[1] <https://bugs.launchpad.net/neutron/+bug/1450617>`_ `[2] <https://bugs.launchpad.net/neutron/+bug/1450625>`_
-releated to this specification for more information.
-
-Proposed Change
-===============
-
-All Neutron network services and VMs are connected to a Neutron network
-via Neutron ports. This makes it possible to create a traffic steering model
-for service chaining that uses only Neutron ports. This traffic steering
-model has no notion of the actual services attached to these Neutron
-ports.
-
-The service VM hosting the service functions is instantiated and configured,
-then VNICs are added to the VM and then these VNICs are attached to the
-network by Neutron ports. Once the service function is attached to Neutron
-ports, the ports may be included in a "port chain" to allow the service
-function to provide treatment to the user's traffic.
-
-A Port Chain (Service Function Path) consists of:
-
-* a set of Neutron ports, to define the sequence of service functions
-* a set of flow classifiers, to specify the classified traffic flows to
- enter the chain
-
-If a service function has a pair of ports, the first port in
-the port-pair is the ingress port of the service function, and the second
-port is the egress port of the service function.
-If a service function has one bidirectional port, then both ports in
-the port-pair have the same value.
-A Port Chain is a directional service chain. The first port of the first port-pair
-is the head of the service chain. The second port of the last port-pair is the tail
-of the service chain. A bidirectional service chain would be composed of two unidirectional Port Chains.
-
-For example, [{'p1': 'p2'}, {'p3': 'p4'}, {'p5': 'p6'}] represents::
-
- +------+ +------+ +------+
- | SF1 | | SF2 | | SF3 |
- +------+ +------+ +------+
- p1| |p2 p3| |p4 p5| |P6
- | | | | | |
- ->---+ +---------+ +----------+ +---->
-
-where P1 is the head of the Port Chain and P6 is the tail of the Port Chain, and
-SF1 has ports p1 and p2, SF2 has ports p3 and p4, and SF3 has ports p5 and p6.
-
-In order to create a chain, the user needs to have the actual port objects.
-The work flow would typically be:
-
-a) create the ports
-b) create the chain
-c) boot the vm's passing the ports as nic's parameters
-
-The sequence of b) and c) can be switched.
-
-A SF's Neutron port may be associated with more than one Port Chain to allow
-a service function to be shared by multiple chains.
-
-If there is more than one service function instance of a specific type
-available to meet the user's service requirement, their Neutron ports are
-included in the port chain as a sub-list. For example, if {p3, p4}, {p7, p8}
-are the port-pairs of two FW instances, they
-both may be included in a port chain for load distribution as shown below.
-
- [{'p1': 'p2'}, [{'p3': 'p4'},{'p7': 'p8'}], {'p5': 'p6'}]
-
-Flow classifiers are used to select the traffic that can
-access the chain. Traffic that matches any flow classifier will be
-directed to the first port in the chain. The flow classifier will be a generic
-independent module and may be used by other projects like FW, QOS, etc.
-
-A flow classifier cannot be part of two different port-chains otherwise ambiguity
-will arise as to which chain path that flow's packets should go. A check will be
-made to ensure no ambiguity. But multiple flow classifiers can be associated with
-a port chain since multiple different types of flows can request the same service
-treatment path.
-
-CLI Commands
-
-Syntax::
-
- neutron port-pair-create [-h]
- [--description <description>]
- --ingress <port-id>
- --egress <port-id>
- [--service-function-parameters <parameter>] PORT-PAIR-NAME
-
- neutron port-pair-group-create [-h]
- [--description <description>]
- --port-pairs <port-pair-id> PORT-PAIR-GROUP-NAME
-
- neutron flow-classifier-create [-h]
- [--description <description>]
- [--protocol <protocol>]
- [--ethertype <Ethertype>]
- [--source-port <Minimum source protocol port>:<Maximum source protocol port>]
- [--destination-port <Minimum destination protocol port>:<Maximum destination protocol port>]
- [--source-ip-prefix <Source IP prefix>]
- [--destination-ip-prefix <Destination IP prefix>]
- [--logical-source-port <Neutron source port>]
- [--logical-destination-port <Neutron destination port>]
- [--l7-parameters <L7 parameter>] FLOW-CLASSIFIER-NAME
-
- neutron port-chain-create [-h]
- [--description <description>]
- --port-pair-group <port-pair-group-id>
- [--flow-classifier <classifier-id>]
- [--chain-parameters <chain-parameter>] PORT-CHAIN-NAME
-
-1. neutron port-chain-create
-The port-chain-create returns the ID of the Port Chain.
-
-Each "port-pair-group" option specifies a type of SF. If a chain consists of a sequence
-of different types of SFs, then the chain will have multiple "port-pair-group"s.
-There must be at least one "port-pair-group" in the Port Chain.
-
-The "flow-classifier" option may be repeated to associate multiple flow classifiers
-with a port chain, with each classifier identifying a flow. If the flow-classifier is not
-specified, then no traffic will be steered through the chain.
-
-One chain parameter option is currently defined. More parameter options can be added
-in future extensions to accommodate new requirements.
-The "correlation" parameter is used to specify the type of chain correlation mechanism.
-This parameter allows different correlation mechanim to be selected.
-This will be set to "mpls" for now to be consistent with current OVS capability.
-If this parameter is not specified, it will default to "mpls".
-
-The port-chain-create command returns the ID of a Port chain.
-
-A port chain can be created, read, updated and deleted, and when a chain is
-created/read/updated/deleted, the options that are involved would be based on
-the CRUD in the "Port Chain" resource table below.
-
-2. neutron port-pair-group-create
-Inside each "port-pair-group", there could be one or more port-pairs.
-Multiple port-pairs may be included in a "port-pair-group" to allow the specification of
-a set of functionally equivalent SFs that can be be used for load distribution,
-i.e., the "port-pair" option may be repeated for multiple port-pairs of
-functionally equivalent SFs.
-
-The port-pair-group-create command returns the ID of a Port Pair group.
-
-3. neutron port-pair-create
-A Port Pair represents a service function instance. The ingress port and the
-egress port of the service function may be specified. If a service function
-has one bidirectional port, the ingress port has the same value as the egress port.
-The "service-function-parameter" option allows the passing of SF specific parameter
-information to the data path. One parameter option is currently defined. More parameter
-options can be added in future extensions to accommodate new requirements.
-The "correlation" parameter is used to specify the type of chain correlation mechanism
-supported by a specific SF. This is needed by the data plane switch to determine
-how to associate a packet with a chain. This will be set to "none" for now since
-there is no correlation mechanism supported by the SF. In the future, it can be extended
-to include "mpls", "nsh", etc.. If this parameter is not specified, it will default to "none".
-
-The port-pair-create command returns the ID of a Port Pair.
-
-4. neutron flow-classifier-create
-A combination of the "source" options defines the source of the flow.
-A combination of the "destination" options defines the destination of the flow.
-The l7_parameter is a place-holder that may be used to support flow classification
-using L7 fields, such as URL. If an option is not specified, it will default to wildcard value
-except for ethertype which defaults to 'IPv4', for logical-source-port and
-logical-destination-port which defaults to none.
-
-The flow-classifier-create command returns the ID of a flow classifier.
-
-
-Data Model Impact
------------------
-
-Data model::
-
- +-------+ +----------+ +------------+
- | Port |--------| Port Pair|--------| Port Pairs |
- | Chain |* *| Groups | 1 *| |
- +-------+ +----------+ +------------+
- |1
- |
- |*
- +--------------+
- | Flow |
- | Classifiers |
- +--------------+
-
-New objects:
-
-Port Chain
- * id - Port chain ID.
- * tenant_id - Tenant ID.
- * name - Readable name.
- * description - Readable description.
- * port_pair_groups - List of port-pair-group IDs.
- * flow_classifiers - List of flow-classifier IDs.
- * chain_parameters - Dict. of chain parameters.
-
-Port Pair Group
- * id - Port pair group ID.
- * tenant_id - Tenant ID.
- * name - Readable name.
- * description - Readable description.
- * port_pairs - List of service function (Neutron) port-pairs.
-
-Port Pair
- * id - Port pair ID.
- * tenant_id - Tenant ID.
- * name - Readable name.
- * description - Readable description.
- * ingress - Ingress port.
- * egress - Egress port.
- * service_function_parameters - Dict. of service function parameters
-
-Flow Classifier
- * id - Flow classifier ID.
- * tenant_id - Tenant ID.
- * name - Readable name.
- * description - Readable description.
- * ethertype - Ethertype ('IPv4'/'IPv6').
- * protocol - IP protocol.
- * source_port_range_min - Minimum source protocol port.
- * source_port_range_max - Maximum source protocol port.
- * destination_port_range_min - Minimum destination protocol port.
- * destination_port_range_max - Maximum destination protocol port.
- * source_ip_prefix - Source IP address or prefix.
- * destination_ip_prefix - Destination IP address or prefix.
- * logical_source_port - Neutron source port.
- * logical_destination_port - Neutron destination port.
- * l7_parameters - Dictionary of L7 parameters.
-
-REST API
---------
-
-Port Chain Operations:
-
-+------------+---------------------------+------------------------------------------+
-|Operation |URL |Description |
-+============+===========================+==========================================+
-|POST |/sfc/port_chains |Create a Port Chain |
-+------------+---------------------------+------------------------------------------+
-|PUT |/sfc/port_chains/{chain_id}|Update a specific Port Chain |
-+------------+---------------------------+------------------------------------------+
-|DELETE |/sfc/port_chains/{chain_id}|Delete a specific Port Chain |
-+------------+---------------------------+------------------------------------------+
-|GET |/sfc/port_chains |List all Port Chains for specified tenant |
-+------------+---------------------------+------------------------------------------+
-|GET |/sfc/port_chains/{chain_id}|Show information for a specific Port Chain|
-+------------+---------------------------+------------------------------------------+
-
-Port Pair Group Operations:
-
-+------------+--------------------------------+-----------------------------------------------+
-|Operation |URL |Description |
-+============+================================+===============================================+
-|POST |/sfc/port_pair_groups |Create a Port Pair Group |
-+------------+--------------------------------+-----------------------------------------------+
-|PUT |/sfc/port_pair_groups/{group_id}|Update a specific Port Pair Group |
-+------------+--------------------------------+-----------------------------------------------+
-|DELETE |/sfc/port_pair_groups/{group_id}|Delete a specific Port Pair Group |
-+------------+--------------------------------+-----------------------------------------------+
-|GET |/sfc/port_pair_groups |List all Port Pair Groups for specified tenant |
-+------------+--------------------------------+-----------------------------------------------+
-|GET |/sfc/port_pair_groups/{group_id}|Show information for a specific Port Pair |
-+------------+--------------------------------+-----------------------------------------------+
-
-Port Pair Operations:
-
-+------------+-------------------------+------------------------------------------+
-|Operation |URL |Description |
-+============+=========================+==========================================+
-|POST |/sfc/port_pairs |Create a Port Pair |
-+------------+-------------------------+------------------------------------------+
-|PUT |/sfc/port_pairs/{pair_id}|Update a specific Port Pair |
-+------------+-------------------------+------------------------------------------+
-|DELETE |/sfc/port_pairs/{pair_id}|Delete a specific Port Pair |
-+------------+-------------------------+------------------------------------------+
-|GET |/sfc/port_pairs |List all Port Pairs for specified tenant |
-+------------+-------------------------+------------------------------------------+
-|GET |/sfc/port_pairs/{pair_id}|Show information for a specific Port Pair |
-+------------+-------------------------+------------------------------------------+
-
-Flow Classifier Operations:
-
-+------------+-------------------------------+------------------------------------------------+
-|Operation |URL |Description |
-+============+===============================+================================================+
-|POST |/sfc/flow_classifiers |Create a Flow-classifier |
-+------------+-------------------------------+------------------------------------------------+
-|PUT |/sfc/flow_classifiers/{flow_id}|Update a specific Flow-classifier |
-+------------+-------------------------------+------------------------------------------------+
-|DELETE |/sfc/flow_classifiers/{flow_id}|Delete a specific Flow-classifier |
-+------------+-------------------------------+------------------------------------------------+
-|GET |/sfc/flow_classifiers |List all Flow-classifiers for specified tenant |
-+------------+-------------------------------+------------------------------------------------+
-|GET |/sfc/flow_classifiers/{flow_id}|Show information for a specific Flow-classifier |
-+------------+-------------------------------+------------------------------------------------+
-
-REST API Impact
----------------
-
-The following new resources will be created as a result of the API handling.
-
-Port Chain resource:
-
-+----------------+----------+--------+---------+----+-------------------------+
-|Attribute |Type |Access |Default |CRUD|Description |
-|Name | | |Value | | |
-+================+==========+========+=========+====+=========================+
-|id |uuid |RO, all |generated|R |Port Chain ID. |
-+----------------+----------+--------+---------+----+-------------------------+
-|tenant_id |uuid |RO, all |from auth|CR |Tenant ID. |
-| | | |token | | |
-+----------------+----------+--------+---------+----+-------------------------+
-|name |string |RW, all |'' |CRU |Port Chain name. |
-+----------------+----------+--------+---------+----+-------------------------+
-|description |string |RW, all |'' |CRU |Port Chain description. |
-+----------------+----------+--------+---------+----+-------------------------+
-|port_pair_groups|list(uuid)|RW, all |N/A |CR |List of port-pair-groups.|
-+----------------+----------+--------+---------+----+-------------------------+
-|flow_classifiers|list(uuid)|RW, all |[] |CRU |List of flow-classifiers.|
-+----------------+----------+--------+---------+----+-------------------------+
-|chain_parameters|dict |RW, all |mpls |CR |Dict. of parameters: |
-| | | | | |'correlation':String |
-+----------------+----------+--------+---------+----+-------------------------+
-
-Port Pair Group resource:
-
-+-----------+--------+---------+---------+----+---------------------+
-|Attribute |Type |Access |Default |CRUD|Description |
-|Name | | |Value | | |
-+===========+========+=========+=========+====+=====================+
-|id |uuid |RO, all |generated|R |Port pair group ID. |
-+-----------+--------+---------+---------+----+---------------------+
-|tenant_id |uuid |RO, all |from auth|CR |Tenant ID. |
-| | | |token | | |
-+-----------+--------+---------+---------+----+---------------------+
-|name |string |RW, all |'' |CRU |Port pair group name.|
-+-----------+--------+---------+---------+----+---------------------+
-|description|string |RW, all |'' |CRU |Port pair group |
-| | | | | |description. |
-+-----------+--------+---------+---------+----+---------------------+
-|port_pairs |list |RW, all |N/A |CRU |List of port-pairs. |
-+-----------+--------+---------+---------+----+---------------------+
-
-Port Pair resource:
-
-+---------------------------+--------+---------+---------+----+----------------------+
-|Attribute Name |Type |Access |Default |CRUD|Description |
-+===========================+========+=========+=========+====+======================+
-|id |uuid |RO, all |generated|R |Port pair ID. |
-+---------------------------+--------+---------+---------+----+----------------------+
-|tenant_id |uuid |RO, all |from auth|CR |Tenant ID. |
-| | | |token | | |
-+---------------------------+--------+---------+---------+----+----------------------+
-|name |string |RW, all |'' |CRU |Port pair name. |
-+---------------------------+--------+---------+---------+----+----------------------+
-|description |string |RW, all |'' |CRU |Port pair description.|
-+---------------------------+--------+---------+---------+----+----------------------+
-|ingress |uuid |RW, all |N/A |CR |Ingress port ID. |
-+---------------------------+--------+---------+---------+----+----------------------+
-|egress |uuid |RW, all |N/A |CR |Egress port ID. |
-+---------------------------+--------+---------+---------+----+----------------------+
-|service_function_parameters|dict |RW, all |None |CR |Dict. of parameters: |
-| | | | | |'correlation':String |
-+---------------------------+--------+---------+---------+----+----------------------+
-
-Flow Classifier resource:
-
-+--------------------------+--------+---------+---------+----+-----------------------+
-|Attribute Name |Type |Access |Default |CRUD|Description |
-| | | |Value | | |
-+==========================+========+=========+=========+====+=======================+
-|id |uuid |RO, all |generated|R |Flow-classifier ID. |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|tenant_id |uuid |RO, all |from auth|CR |Tenant ID. |
-| | | |token | | |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|name |string |RW, all |'' |CRU |Flow-classifier name. |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|description |string |RW, all |'' |CRU |Flow-classifier |
-| | | | | |description. |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|ethertype |string |RW, all |'IPv4' |CR |L2 ethertype. Can be |
-| | | | | |'IPv4' or 'IPv6' only. |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|protocol |string |RW, all |Any |CR |IP protocol name. |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|source_port_range_min |integer |RW, all |Any |CR |Minimum source |
-| | | | | |protocol port. |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|source_port_range_max |integer |RW, all |Any |CR |Maximum source |
-| | | | | |protocol port. |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|destination_port_range_min|integer |RW, all |Any |CR |Minimum destination |
-| | | | | |protocol port. |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|destination_port_range_max|integer |RW, all |Any |CR |Maximum destination |
-| | | | | |protocol port. |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|source_ip_prefix |CIDR |RW, all |Any |CR |Source IPv4 or IPv6 |
-| | | | | |prefix. |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|destination_ip_prefix |CIDR |RW, all |Any |CR |Destination IPv4 or |
-| | | | | |IPv6 prefix. |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|logical_source_port |uuid |RW, all |None |CR |Neutron source port. |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|logical_destination_port |uuid |RW, all |None |CR |Neutron destination |
-| | | | | |port. |
-+--------------------------+--------+---------+---------+----+-----------------------+
-|l7_parameters |dict |RW, all |Any |CR |Dict. of L7 parameters.|
-+--------------------------+--------+---------+---------+----+-----------------------+
-
-Json Port-pair create request example::
-
- {"port_pair": {"name": "SF1",
- "tenant_id": "d382007aa9904763a801f68ecf065cf5",
- "description": "Firewall SF instance",
- "ingress": "dace4513-24fc-4fae-af4b-321c5e2eb3d1",
- "egress": "aef3478a-4a56-2a6e-cd3a-9dee4e2ec345",
- }
- }
-
- {"port_pair": {"name": "SF2",
- "tenant_id": "d382007aa9904763a801f68ecf065cf5",
- "description": "Loadbalancer SF instance",
- "ingress": "797f899e-73d4-11e5-b392-2c27d72acb4c",
- "egress": "797f899e-73d4-11e5-b392-2c27d72acb4c",
- }
- }
-
-Json Port-pair create response example::
-
- {"port_pair": {"name": "SF1",
- "tenant_id": "d382007aa9904763a801f68ecf065cf5",
- "description": "Firewall SF instance",
- "ingress": "dace4513-24fc-4fae-af4b-321c5e2eb3d1",
- "egress": "aef3478a-4a56-2a6e-cd3a-9dee4e2ec345",
- "id": "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae",
- }
- }
-
- {"port_pair": {"name": "SF2",
- "tenant_id": "d382007aa9904763a801f68ecf065cf5",
- "description": "Loadbalancer SF instance",
- "ingress": "797f899e-73d4-11e5-b392-2c27d72acb4c",
- "egress": "797f899e-73d4-11e5-b392-2c27d72acb4c",
- "id": "d11e9190-73d4-11e5-b392-2c27d72acb4c"
- }
- }
-
-Json Port Pair Group create request example::
-
- {"port_pair_group": {"name": "Firewall_PortPairGroup",
- "tenant_id": "d382007aa9904763a801f68ecf065cf5",
- "description": "Grouping Firewall SF instances",
- "port_pairs": [
- "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae"
- ]
- }
- }
-
- {"port_pair_group": {"name": "Loadbalancer_PortPairGroup",
- "tenant_id": "d382007aa9904763a801f68ecf065cf5",
- "description": "Grouping Loadbalancer SF instances",
- "port_pairs": [
- "d11e9190-73d4-11e5-b392-2c27d72acb4c"
- ]
- }
- }
-
-Json Port Pair Group create response example::
-
- {"port_pair_group": {"name": "Firewall_PortPairGroup",
- "tenant_id": "d382007aa9904763a801f68ecf065cf5",
- "description": "Grouping Firewall SF instances",
- "port_pairs": [
- "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae
- ],
- "id": "4512d643-24fc-4fae-af4b-321c5e2eb3d1",
- }
- }
-
- {"port_pair_group": {"name": "Loadbalancer_PortPairGroup",
- "tenant_id": "d382007aa9904763a801f68ecf065cf5",
- "description": "Grouping Loadbalancer SF instances",
- "port_pairs": [
- "d11e9190-73d4-11e5-b392-2c27d72acb4c"
- ],
- "id": "4a634d49-76dc-4fae-af4b-321c5e23d651",
- }
- }
-
-Json Flow Classifier create request example::
-
- {"flow_classifier": {"name": "FC1",
- "tenant_id": "1814726e2d22407b8ca76db5e567dcf1",
- "description": "Flow rule for classifying TCP traffic",
- "protocol": "TCP",
- "source_port_range_min": 22, "source_port_range_max": 4000,
- "destination_port_range_min": 80, "destination_port_range_max": 80,
- "source_ip_prefix": null, "destination_ip_prefix": "22.12.34.45"
- }
- }
-
- {"flow_classifier": {"name": "FC2",
- "tenant_id": "1814726e2d22407b8ca76db5e567dcf1",
- "description": "Flow rule for classifying UDP traffic",
- "protocol": "UDP",
- "source_port_range_min": 22, "source_port_range_max": 22,
- "destination_port_range_min": 80, "destination_port_range_max": 80,
- "source_ip_prefix": null, "destination_ip_prefix": "22.12.34.45"
- }
- }
-
-Json Flow Classifier create response example::
-
- {"flow_classifier": {"name": "FC1",
- "tenant_id": "1814726e2d22407b8ca76db5e567dcf1",
- "description": "Flow rule for classifying TCP traffic",
- "protocol": "TCP",
- "source_port_range_min": 22, "source_port_range_max": 4000,
- "destination_port_range_min": 80, "destination_port_range_max": 80,
- "source_ip_prefix": null , "destination_ip_prefix": "22.12.34.45",
- "id": "4a334cd4-fe9c-4fae-af4b-321c5e2eb051"
- }
- }
-
- {"flow_classifier": {"name": "FC2",
- "tenant_id": "1814726e2d22407b8ca76db5e567dcf1",
- "description": "Flow rule for classifying UDP traffic",
- "protocol": "UDP",
- "source_port_range_min": 22, "source_port_range_max": 22,
- "destination_port_range_min": 80, "destination_port_range_max": 80,
- "source_ip_prefix": null , "destination_ip_prefix": "22.12.34.45",
- "id": "105a4b0a-73d6-11e5-b392-2c27d72acb4c"
- }
- }
-
-Json Port Chain create request example::
-
- {"port_chain": {"name": "PC1",
- "tenant_id": "d382007aa9904763a801f68ecf065cf5",
- "description": "Steering TCP and UDP traffic first to Firewall and then to Loadbalancer",
- "flow_classifiers": [
- "4a334cd4-fe9c-4fae-af4b-321c5e2eb051",
- "105a4b0a-73d6-11e5-b392-2c27d72acb4c"
- ],
- "port_pair_groups": [
- "4512d643-24fc-4fae-af4b-321c5e2eb3d1",
- "4a634d49-76dc-4fae-af4b-321c5e23d651"
- ],
- }
- }
-
-Json Port Chain create response example::
-
- {"port_chain": {"name": "PC2",
- "tenant_id": "d382007aa9904763a801f68ecf065cf5",
- "description": "Steering TCP and UDP traffic first to Firewall and then to Loadbalancer",
- "flow_classifiers": [
- "4a334cd4-fe9c-4fae-af4b-321c5e2eb051",
- "105a4b0a-73d6-11e5-b392-2c27d72acb4c"
- ],
- "port_pair_groups": [
- "4512d643-24fc-4fae-af4b-321c5e2eb3d1",
- "4a634d49-76dc-4fae-af4b-321c5e23d651"
- ],
- "id": "1278dcd4-459f-62ed-754b-87fc5e4a6751"
- }
- }
-
-
-Implementation
-==============
-
-Assignee(s)
------------
-Authors of the Specification and Primary contributors:
- * Cathy Zhang (cathy.h.zhang@huawei.com)
- * Louis Fourie (louis.fourie@huawei.com)
-
-Other contributors:
- * Vikram Choudhary (vikram.choudhary@huawei.com)
- * Swaminathan Vasudevan (swaminathan.vasudevan@hp.com)
- * Yuji Azama (yuj-azama@rc.jp.nec.com)
- * Mohan Kumar (nmohankumar1011@gmail.com)
- * Ramanjaneya (ramanjieee@gmail.com)
- * Stephen Wong (stephen.kf.wong@gmail.com)
- * Nicolas Bouthors (Nicolas.BOUTHORS@qosmos.com)
- * Akihiro Motoki <amotoki@gmail.com>
- * Paul Carver <pcarver@att.com>
-
diff --git a/doc/source/command_extensions.rst b/doc/source/command_extensions.rst
deleted file mode 100644
index abb4096..0000000
--- a/doc/source/command_extensions.rst
+++ /dev/null
@@ -1,64 +0,0 @@
-..
- Copyright 2015 Futurewei. All rights reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-
- Convention for heading levels in Neutron devref:
- ======= Heading 0 (reserved for the title in a document)
- ------- Heading 1
- ~~~~~~~ Heading 2
- +++++++ Heading 3
- ''''''' Heading 4
- (Avoid deeper levels because they do not render well.)
-
-
-=================
-Command extension
-=================
-
-Networking-sfc uses python-neutronclient's existing command extension framework
-for adding required command lines for realizing service function chaining
-functionality. Refer to `Python-neutronclient command extension <http://docs.openstack.org/developer/python-neutronclient/devref/client_command_extensions.html>`_ for further details.
-
-
-List of New Neutron CLI Commands:
----------------------------------
-Below listed command lines are introduced for realizing service function chaining.
-
-::
-
- flow-classifier-create Create a flow-classifier.
- flow-classifier-delete Delete a given flow-classifier.
- flow-classifier-list List flow-classifiers that belong to a given tenant.
- flow-classifier-show Show information of a given flow-classifier.
- flow-classifier-update Update flow-classifier information.
-
- port-pair-create Create a port-pair.
- port-pair-delete Delete a given port-pair.
- port-pair-list List port-pairs that belongs to a given tenant.
- port-pair-show Show information of a given port-pair.
- port-pair-update Update port-pair's information.
-
- port-pair-group-create Create a port-pair-group.
- port-pair-group-delete Delete a given port-pair-group.
- port-pair-group-list List port-pair-groups that belongs to a given tenant.
- port-pair-group-show Show information of a given port-pair-group.
- port-pair-group-update Update port-pair-group's information.
-
- port-chain-create Create a port-chain.
- port-chain-delete Delete a given port-chain.
- port-chain-list List port-chains that belong to a given tenant.
- port-chain-show Show information of a given port-chain.
- port-chain-update Update port-chain's information.
-
diff --git a/doc/source/conf.py b/doc/source/conf.py
deleted file mode 100755
index e70c181..0000000
--- a/doc/source/conf.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-
-sys.path.insert(0, os.path.abspath('../..'))
-# -- General configuration ----------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = [
- 'sphinx.ext.autodoc',
- #'sphinx.ext.intersphinx',
- 'oslosphinx'
-]
-
-# autodoc generation is a bit aggressive and a nuisance when doing heavy
-# text edit cycles.
-# execute "export SPHINX_DEBUG=1" in your terminal to disable
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'networking-sfc'
-copyright = u'2013, OpenStack Foundation'
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-add_module_names = True
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# -- Options for HTML output --------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
-# html_theme_path = ["."]
-# html_theme = '_theme'
-# html_static_path = ['static']
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = '%sdoc' % project
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass
-# [howto/manual]).
-latex_documents = [
- ('index',
- '%s.tex' % project,
- u'%s Documentation' % project,
- u'OpenStack Foundation', 'manual'),
-]
-
-# Example configuration for intersphinx: refer to the Python standard library.
-#intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/doc/source/contribution.rst b/doc/source/contribution.rst
deleted file mode 100644
index 852aa97..0000000
--- a/doc/source/contribution.rst
+++ /dev/null
@@ -1,29 +0,0 @@
-..
- Copyright 2015 Futurewei. All rights reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-
- Convention for heading levels in Neutron devref:
- ======= Heading 0 (reserved for the title in a document)
- ------- Heading 1
- ~~~~~~~ Heading 2
- +++++++ Heading 3
- ''''''' Heading 4
- (Avoid deeper levels because they do not render well.)
-
-
-============
-Contribution
-============
-.. include:: ../../CONTRIBUTING.rst
diff --git a/doc/source/index.rst b/doc/source/index.rst
deleted file mode 100644
index da33019..0000000
--- a/doc/source/index.rst
+++ /dev/null
@@ -1,66 +0,0 @@
-..
- Copyright 2015 Futurewei. All rights reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
- Convention for heading levels in Neutron devref:
- ======= Heading 0 (reserved for the title in a document)
- ------- Heading 1
- ~~~~~~~ Heading 2
- +++++++ Heading 3
- ''''''' Heading 4
- (Avoid deeper levels because they do not render well.)
-
-
-Welcome to the Service Function Chaining Documentation!
-=======================================================
-.. include:: ../../README.rst
-
-
-===============
-Developer Guide
-===============
-
-In the Developer Guide, you will find information on Networking-SFC lower level
-programming APIs. There are sections that cover the core pieces of networking-sfc,
-including its api, command-lines, database, system-design, alembic-migration etc.
-There are also subsections that describe specific plugins inside networking-sfc.
-Finally, the developer guide includes information about testing infrastructure.
-
-Programming HowTos and Tutorials
---------------------------------
-.. toctree::
- :maxdepth: 2
-
- installation
- usage
- contribution
-
-Networking-SFC Internals
-------------------------
-.. toctree::
- :maxdepth: 2
-
- api
- system_design and_workflow
- ovs_driver_and_agent_workflow
- command_extensions
- alembic_migration
-
-Indices and tables
-------------------
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
-
diff --git a/doc/source/installation.rst b/doc/source/installation.rst
deleted file mode 100644
index bf133b0..0000000
--- a/doc/source/installation.rst
+++ /dev/null
@@ -1,37 +0,0 @@
-..
- Copyright 2015 Futurewei. All rights reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-
- Convention for heading levels in Neutron devref:
- ======= Heading 0 (reserved for the title in a document)
- ------- Heading 1
- ~~~~~~~ Heading 2
- +++++++ Heading 3
- ''''''' Heading 4
- (Avoid deeper levels because they do not render well.)
-
-
-============
-Installation
-============
-
-At the command line::
-
- $ pip install networking-sfc
-
-Or, if you have virtualenvwrapper installed::
-
- $ mkvirtualenv networking-sfc
- $ pip install networking-sfc
diff --git a/doc/source/ovs_driver_and_agent_workflow.rst b/doc/source/ovs_driver_and_agent_workflow.rst
deleted file mode 100644
index d8cda09..0000000
--- a/doc/source/ovs_driver_and_agent_workflow.rst
+++ /dev/null
@@ -1,311 +0,0 @@
-..
- Copyright 2015 Futurewei. All rights reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-
- Convention for heading levels in Neutron devref:
- ======= Heading 0 (reserved for the title in a document)
- ------- Heading 1
- ~~~~~~~ Heading 2
- +++++++ Heading 3
- ''''''' Heading 4
- (Avoid deeper levels because they do not render well.)
-
-
-=============================
-OVS Driver and Agent Workflow
-=============================
-
-Blueprint about `Common Service chaining driver <https://blueprints.launchpad.net/neutron/+spec/common-service-chaining-driver-api>`_ describes the OVS driver and agent necessity for realizing service function chaining.
-
-Problem Description
-===================
-
-The service chain OVS driver and agents are used to configure back-end
-Openvswitch devices to render service chaining in the data-plane. The driver
-manager controls a common service chain API which provides a consistent interface
-between the service chain manager and different device drivers.
-
-Proposed Change
-===============
-
-
-Design::
-
- Port Chain Plugin
- +-------------------------------+
- | +-------------------------+ |
- | | Port Chain API | |
- | +-------------------------+ |
- | | Port Chain Database | |
- | +-------------------------+ |
- | | Driver Manager | |
- | +-------------------------+ |
- | | Common Driver API | |
- | +-------------------------+ |
- | | |
- | +-------------------------+ |
- | | OVS Driver | |
- | +-------------------------+ |
- +-------|----------------|------+
- |rpc |rpc
- +-----------+ +-----------+
- | OVS Agent | | OVS Agent |
- +-----------+ +-----------+
-
-A OVS service chain driver and agents communicate via rpc.
-
-OVS Driver
-----------
-The OVS Driver is extended to support service chaining. The driver interfaces
-with the OVS agents that reside on each Compute node. The OVS driver is responsible
-for the following:
-
-* Identify the OVS agents that directly connects to the SF instances and establish
- communication with OVS agents on the Compute nodes.
-* Send commands to the OVS agents to create bridges, flow tables and flows to steer
- chain traffic to the SF instances.
-
-OVS Agent
----------
-The OVS agent will manage the OVS using OVSDB commands to create bridges and tables,
-and install flows to steer chain traffic to the SF instances.
-
-Existing tunnels between the Tunnel bridges on each Compute node are used to
-transport Port Chain traffic between the CNs.
-
-The OVS Agent will create these tunnels to transport SFC traffic between Compute
-nodes on which there are SFs. Each tunnel port has the following attributes:
-
-* Name
-* Local tunnel IP address
-* Remote tunnel IP address
-* Tunnel Type: VXLAN, GRE
-
-The OVS agent installs additional flows on the Integration bridge and the Tunnel bridge
-to perform the following functions:
-
-* Traffic classification. The Integration bridge classifies traffic from a VM port or
- Service VM port attached to the Integration bridge. The flow classification is based on
- the n-tuple rules.
-* Service function forwarding. The Tunnel bridge forwards service chain
- packets to the next-hop Compute node via tunnels, or to the next Service VM port
- on that Compute node. Integration bridge will terminate a Service Function Path.
-
-The OVS Agent will use the MPLS header to transport the chain path identifier
-and chain hop index. The MPLS label will transport the chain path identifier,
-and the MPLS ttl will transport the chain hop index. The following packet encapsulation
-will be used::
-
- IPv4 Packet:
- +----------+------------------------+-------+
- |L2 header | IP + UDP dst port=4790 | VXLAN |
- +----------+------------------------+-------+
- -----------------------------+---------------+--------------------+
- Original Ethernet, ET=0x8847 | MPLS header | Original IP Packet |
- -----------------------------+---------------+--------------------+
-
-This is not intended as a general purpose MPLS implementation but rather as a
-temporary internal mechanism. It is anticipated that the MPLS label will be
-replaced with an NSH encapsulation
-(https://datatracker.ietf.org/doc/draft-ietf-sfc-nsh/) once NSH support is
-available upstream in Open vSwitch. If the service function does not support
-the header, then the vSwitch will act as Service Function Forwarder (SFF)
-Proxy which will strip off the header when forwarding the packet to the SF
-and re-add the header when receiving the packet from the SF.
-
-OVS Bridge and Tunnel
----------------------
-Existing tunnels between the Tunnel bridges on each Compute node are used to
-transport Port Chain traffic between the CNs::
-
- CN1 CN2
- +--------------------------+ +-------------------------+
- | +-----+ +-----+ | | +-----+ +-----+ |
- | | VM1 | | SF1 | | | | SF2 | | SF3 | |
- | +-----+ +-----+ | | +-----+ +-----+ |
- | |. ^|. | | ^| |. ^|. |
- | +----.-----------.-.--+ | | +-.---.---------.-.---+ |
- | | ............. .. | | | | . ........... . | |
- | | Integration Bridge. | | | | .Integration Bridge | |
- | | ......... | | | | ...... ........ | |
- | +-----------.---------+ | | +-------.--.----------+ |
- | |. | | .| . |
- | +-----------.---------+ | | +-------.--.----------+ |
- | | ................................. ..................>
- | | Tunnel Bridge |-------------| Tunnel Bridge | |
- | +---------------------+ | Tunnel | +---------------------+ |
- | | | |
- +--------------------=-----+ +-------------------------+
-
-
-
-Flow Tables and Flow Rules
---------------------------
-The OVS Agent adds additional flows (shown above) on the Integration bridge to support
-Port Chains:
-
-1. Egress Port Chain flows to steer traffic from SFs attached to the Integration bridge to a
- Tunnel bridge to the next-hop Compute node. These flows may be handled using the OpenFlow
- Group in the case where there are multiple port-pairs in the next-hop port-pair group.
-2. Ingress Port Chain flows on the Tunnel bridge to steer service chain traffic from a
- tunnel from a previous Compute node to SFs attached to the Integration bridge.
-3. Internal Port Chain flows are used to steer service chain traffic from one SF to another SF
- on the same Compute Node.
-
-The Port Chain flow rules have the higher priority, and will not impact
-the existing flow rules on the Integration bridge. If traffic from SF is not part of
-a service chain, e.g., DHCP messages, ARP packets etc., it will match the existing
-flow rules on the Integration bridge.
-
-The following tables are used to process Port Chain traffic:
-
-* Local Switching Table (Table 0). This existing table has two new flows to handle
- incoming traffic from the SF egress port and the tunnel port between Compute nodes.
-
-* Group Table. This new table is used to select multiple paths for load-balancing across
- multiple port-pairs in a port-pair group. There are multiple buckets in the group if the next
- hop is a port-pair group with multiple port-pairs. The group actions will be to send the packet
- to next hop SF instance.
- If the next hop port-pair is on another Compute node, the action output to the tunnel port to the
- next hop Compute node. If the next hop port-pair is on the same Compute node, then the
- action will be to resubmit to the TUN_TABLE for local chaining process.
-
-Local Switching Table (Table 0) Flows
--------------------------------------
-Traffic from SF Egress port: classify for chain and direct to group::
-
- priority=10,in_port=SF_EGRESS_port,traffic_match_field,
- actions=strip_vlan,set_tunnel:VNI,group:gid.
-
-Traffic from Tunnel port::
-
- priority=10,in_port=TUNNEL_port,
- actions=resubmit(,TUN_TABLE[type]).
-
-
-Group Table Flows
------------------
-The Group table is used for load distribution to spread the traffic load across a port-pair group of
-multiple port-pairs (SFs of the same type). This uses the hashing of several fields in the packet.
-There are multiple buckets in the group if the next hop is a port-pair group with multiple port-pairs.
-
-The group actions will be to send the packet to next hop SF instances. If the next hop port-pair
-is on another Compute node, the action output to the tunnel port to the next hop Compute node.
-If the next hop port-pair is on the same Compute node, then the action will be to resubmit
-to the TUN_TABLE for local chaining process.
-
-The OVSDB command to create a group of type Select with a hash selection method and two buckets
-is shown below. This is existing OVS functionality. The ip_src,nw_proto,tp_src packet fields are
-used for the hash::
-
- group_id=gid,type=select,selection_method=hash,fields=ip_src,nw_proto,tp_src
- bucket=set_field:10.1.1.3->ip_dst,output:10,
- bucket=set_field:10.1.1.4->ip_dst,output:10
-
-
-Data Model Impact
------------------
-None
-
-Alternatives
-------------
-
-None
-
-Security Impact
----------------
-
-None.
-
-Notifications Impact
---------------------
-
-There will be logging to trouble-shoot and verify correct operation.
-
-Other End User Impact
----------------------
-
-None.
-
-Performance Impact
-------------------
-
-It is not expected that these flows will have a significant performance impact.
-
-IPv6 Impact
------------
-
-None.
-
-Other Deployer Impact
----------------------
-
-None
-
-Developer Impact
-----------------
-
-None
-
-Community Impact
-----------------
-
-Existing OVS driver and agent functionality will not be affected.
-
-Implementation
-==============
-
-Assignee(s)
------------
-
-* Cathy Zhang (cathy.h.zhang@huawei.com)
-* Louis Fourie (louis.fourie@huawei.com)
-* Stephen Wong (stephen.kf.wong@gmail.com)
-
-Work Items
-----------
-
-* Port Chain OVS driver.
-* Port Chain OVS agent.
-* Unit test.
-
-Dependencies
-============
-
-This design depends upon the proposed `Neutron Service Chaining API extensions <https://blueprints.launchpad.net/neutron/+spec/neutron-api-extension-for-service-chaining>`_
-
-Openvswitch.
-
-Testing
-=======
-
-Tempest and functional tests will be created.
-
-Documentation Impact
-====================
-
-Documented as extension.
-
-User Documentation
-------------------
-
-Update networking API reference.
-Update admin guide.
-
-Developer Documentation
------------------------
-
-None
-
diff --git a/doc/source/system_design and_workflow.rst b/doc/source/system_design and_workflow.rst
deleted file mode 100644
index 11e0849..0000000
--- a/doc/source/system_design and_workflow.rst
+++ /dev/null
@@ -1,245 +0,0 @@
-..
- Copyright 2015 Futurewei. All rights reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-
- Convention for heading levels in Neutron devref:
- ======= Heading 0 (reserved for the title in a document)
- ------- Heading 1
- ~~~~~~~ Heading 2
- +++++++ Heading 3
- ''''''' Heading 4
- (Avoid deeper levels because they do not render well.)
-
-
-==========================
-System Design and Workflow
-==========================
-
-Problem Description
-===================
-The `Service Chaining API specification <http://docs.openstack.org/developer/networking-sfc/api.html>`_ proposes a Neutron port based solution for setting up a service chain. A specification on the system architecture and related API work flow is needed to guide the code design.
-
-System Architecture
-============================
-The following figure shows the generic architecture of the Port Chain
-Plugin. As shown in the diagram, Port Chain Plugin can be backed by
-different service providers such as OVS Driver and/or different types of
-SDN Controller Drivers. Through the "Common Driver API", these
-different drivers can provide different implementations for the service
-chain path rendering. In the first release and deployment based on this
-release, we will only deliver codes for the OVS driver. In the next release,
-we can add codes to support multiple active drivers::
-
- Port Chain Plugin With Different Types of Drivers
- +-----------------------------------------------------------------+
- | +-----------------------------------------------------------+ |
- | | Port Chain API | |
- | +-----------------------------------------------------------+ |
- | | Port Chain Database | |
- | +-----------------------------------------------------------+ |
- | | Driver Manager | |
- | +-----------------------------------------------------------+ |
- | | Common Driver API | |
- | +-----------------------------------------------------------+ |
- | | |
- | +------------+------------------------+---------------------+ |
- | | OVS Driver | Controller Driver1 | Controller Driver2 | |
- | +------------+------------------------+---------------------+ |
- +-------|------------------|-------------------------|------------+
- | | |
- +-----------+ +-----------------+ +-----------------+
- | OVS Agent | | SDN Controller1 | | SDN Controller2 |
- +-----------+ +-----------------+ +-----------------+
-
-The second figure below shows the reference implementation architecture,
-which is through the OVS Driver path. The figure shows the components
-that will be added on the Neutron Server and the compute nodes to
-support this Neutron Based SFC functionality. As shown in the diagram,
-a new Port Chain Plugin will be added to the Neutron Server.
-The existing "OVS Driver" and "OVS Agent" will be extended to support
-the service chain functionality. The OVS Driver will communicate with
-each OVS Agent to program its OVS forwarding table properly so that a
-tenant's traffic flow can be steered through the user defined sequence
-of Neutron ports to get the desired service treatment from the Service
-Function running on the VMs.
-
-A separate `OVS Driver and Agent specification <http://docs.openstack.org/developer/networking-sfc/portchain-ovs-driver-agent.html>`_ will describe in more
-detail on the design consideration of the Driver, Agent, and how to set up the
-classification rules on the OVS to identify different flows and how to
-set up the OVS forwarding table. In the reference implementation, the OVS Driver
-communicates with OVS Agent through RPC to program the OVS. The communication
-between the OVS Agent and the OVS is through OVSDB/Openflow::
-
-
- Port Chain Plugin With OVS Driver
- +-------------------------------+
- | +-------------------------+ |
- | | Port Chain API | |
- | +-------------------------+ |
- | | Port Chain Database | |
- | +-------------------------+ |
- | | Driver Manager | |
- | +-------------------------+ |
- | | Common Driver API | |
- | +-------------------------+ |
- | | |
- | +-------------------------+ |
- | | OVS Driver | |
- | +-------------------------+ |
- +-------|----------------|------+
- | |
- +-----------+ +-----------+
- | OVS Agent | | OVS Agent |
- +-----------+ +-----------+
-
-Port Chain Creation Workflow
-============================
-The following example shows how the Neutron CLI commands may be used to
-create a port-chain consisting of a service VM vm1 and a service VM
-vm2. The user can be an Admin/Tenant or an Application built on top.
-
-Traffic flow into the Port Chain will be from source IP address
-22.1.20.1 TCP port 23 to destination IP address 171.4.5.6 TCP port 100.
-The flow needs to be treated by SF1 running on VM1 identified by
-Neutron port pair [p1, p2] and SF2 running on VM2 identified by Neutron
-port pair [p3, p4].
-
-The net1 should be created before creating Neutron port using existing
-Neutron API. The design has no restriction on the type of net1, i.e. it
-can be any type of Neutron network since SFC traffic will be tunneled
-transparently through the type of communication channels of net1.
-If the transport between vSwitches is VXLAN, then we will use that VXLAN
-tunnel (and NOT create another new tunnel) to transport the SFC traffic
-through. If the transport between vSwitches is Ethernet, then the SFC
-traffic will be transported through Ethernet. In other words, the SFC
-traffic will be carried over existing transport channel between vSwitches
-and the external transport channel between vSwitches is set up for net1
-through existing Neutron API and ML2. The built-in OVS backend
-implements tunneling the original flow packets over VXLAN tunnel. The detailed
-outer VXLAN tunnel transport format and inner SFC flow format including
-how to leverage existing OVS's support for MPLS label to carry chain ID
-will be described in the `Port Chain OVS Driver and Agent specification <http://docs.openstack.org/developer/networking-sfc/portchain-ovs-driver-agent.html>`_.
-In the future we can add implementation of tunneling the SFC flow packets over
-flat L2 Ethernet or L3 IP network or GRE tunnel etc.
-
-Boot service VMs and attach ports
----------------------------------
-Create Neutron ports on network net1::
-
- neutron port-create --name p1 net1
- neutron port-create --name p2 net1
- neutron port-create --name p3 net1
- neutron port-create --name p4 net1
-
-Boot VM1 from Nova with ports p1 and p2 using two --nic options::
-
- nova boot --image xxx --nic port-id=p1 --nic port-id=p2 vm1
-
-Boot VM2 from Nova with ports p3 and p4 using two --nic options::
-
- nova boot --image yyy --nic port-id=p3 --nic port-id=p4 vm2
-
-Alternatively, the user can create each VM with one VNIC and then
-attach another Neutron port to the VM::
-
- nova boot --image xxx --nic port-id=p1 vm1
- nova interface-attach --port-id p2 vm1
- nova boot --image yyy --nic port-id=p3 vm2
- nova interface-attach --port-id p4 vm2
-
-Once the Neutron ports p1 - p4 exist, the Port Chain is created using
-the steps described below.
-
-Create Flow Classifier
-----------------------
-Create flow-classifier FC1 that matches on source IP address 22.1.20.1
-(ingress direction) and destination IP address 171.4.5.6 (egress
-direction) with TCP connection, source port 23 and destination port
-100::
-
- neutron flow-classifier-create
- --ip-version ipv4
- --source-ip-prefix 22.1.20.1/32
- --destination-ip-prefix 172.4.5.6/32
- --protocol tcp
- --source-port 23:23
- --destination-port 100:100 FC1
-
-Create Port Pair
------------------
-Create port-pair PP1 with ports p1 and p2, port-pair PP2 with
-ports p3 and p4, port-pair PP3 with ports P5 and P6::
-
- neutron port-pair-create
- --ingress=p1
- --egress=p2 PP1
- neutron port-pair-create
- --ingress=p3
- --egress=p4 PP2
- neutron port-pair-create
- --ingress=p5
- --egress=p6 PP3
-
-Create Port Group
------------------
-Create port-pair-group PG1 with port-pair PP1 and PP2, and
-port-pair-group PG2 with port-pair PP3::
-
- neutron port-pair-group-create
- --port-pair PP1 --port-pair PP2 PG1
- neutron port-pair-group-create
- --port-pair PP3 PG2
-
-Create Port Chain
------------------
-
-Create port-chain PC1 with port-group PG1 and PG2, and flow
-classifier FC1::
-
- neutron port-chain-create
- --port-pair-group PG1 --port-pair-group PG2 --flow-classifier FC1 PC1
-
-This will result in the Port chain driver being invoked to create the
-Port Chain.
-
-The following diagram illustrates the code execution flow (not the
-exact codes) for the port chain creation::
-
- PortChainAPIParsingAndValidation: create_port_chain
- |
- V
- PortChainPlugin: create_port_chain
- |
- V
- PortChainDbPlugin: create_port_chain
- |
- V
- DriverManager: create_port_chain
- |
- V
- portchain.drivers.OVSDriver: create_port_chain
-
-The vSwitch Driver needs to figure out which switch VM1 is connecting
-with and which switch VM2 is connecting with (for OVS case, the OVS
-driver has that information given the VMs' port info). As to the
-connection setup between the two vSwitches, it should be done through
-existing ML2 plugin mechanism. The connection between these two
-vSwitches should already be set up before the user initiates the SFC
-request. The service chain flow packets will be tunneled through the
-connecting type/technology (e.g. VXLAN or GRE) between the two
-vSwitches. For our reference code implementation, we will use VXLAN to
-show a complete data path setup. Please refer to the `OVS Driver and OVS
-Agent specification <http://docs.openstack.org/developer/networking-sfc/portchain-ovs-driver-agent.html>`_ for more detail info.
-
diff --git a/doc/source/usage.rst b/doc/source/usage.rst
deleted file mode 100644
index c097c33..0000000
--- a/doc/source/usage.rst
+++ /dev/null
@@ -1,32 +0,0 @@
-..
- Copyright 2015 Futurewei. All rights reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-
- Convention for heading levels in Neutron devref:
- ======= Heading 0 (reserved for the title in a document)
- ------- Heading 1
- ~~~~~~~ Heading 2
- +++++++ Heading 3
- ''''''' Heading 4
- (Avoid deeper levels because they do not render well.)
-
-
-=====
-Usage
-=====
-
-To use networking-sfc in a project::
-
- import networking_sfc
diff --git a/docs/release/userguide/feature.userguide.rst b/docs/release/userguide/feature.userguide.rst
index 941c959..d9e3729 100644
--- a/docs/release/userguide/feature.userguide.rst
+++ b/docs/release/userguide/feature.userguide.rst
@@ -15,42 +15,42 @@ License
OpenStack VNFFG/SFC project
===========================
-
-The OpenStack SFC (networking-sfc) project provides VNFFG capability
-to OpenStack networking (neutron). It is integrated with the Tacker
-orchestrator and multiple back-end implementations including OVS, ONOS
-and ODL is described in the following documentation.
-https://docs.openstack.org/newton/networking-guide/config-sfc.html
-https://docs.openstack.org/ocata/networking-guide/config-sfc.html
-https://docs.openstack.org/developer/networking-sfc/
+The OpenStack SFC (networking-sfc) project provides VNF Forwarding Graph
+(VNFFG) capability to OpenStack networking (neutron). The feature is integrated
+with the Tacker orchestrator. Multiple back-end implementations including OVS,
+ONOS and ODL is described in the following documentation.
+
+- https://docs.openstack.org/newton/networking-guide/config-sfc.html
+- https://docs.openstack.org/ocata/networking-guide/config-sfc.html
+- https://docs.openstack.org/developer/networking-sfc/
Integration with Openstack Tacker VNFFG Orchestrator
----------------------------------------------------
-https://specs.openstack.org/openstack/tacker-specs/specs/newton/tacker-networking-sfc.html
-https://specs.openstack.org/openstack/tacker-specs/specs/newton/tacker-vnffg.html
-https://blueprints.launchpad.net/tacker/+spec/tacker-vnffg
-https://review.openstack.org/#/c/292196/
-https://review.openstack.org/#/c/290771/
+- https://specs.openstack.org/openstack/tacker-specs/specs/newton/tacker-networking-sfc.html
+- https://specs.openstack.org/openstack/tacker-specs/specs/newton/tacker-vnffg.html
+- https://blueprints.launchpad.net/tacker/+spec/tacker-vnffg
+- https://review.openstack.org/#/c/292196/
+- https://review.openstack.org/#/c/290771/
Integration with ONOS SDN controller
------------------------------------
-http://docs.openstack.org/developer/networking-onos/devref/sfc_driver.html
-http://docs.openstack.org/developer/networking-onos/specs/sfc_driver.html
-https://blueprints.launchpad.net/networking-sfc/+spec/networking-sfc-onos-driver
-
-Openstack Tacker/Neutron SFC/ONOS Controller Demo Video
--------------------------------------------------------
-https://www.openstack.org/videos/austin-2016/realize-sfc-using-onos-controller
-VNFFG/ONOSFW Demo Video
------------------------
-ONOS Framework (ONOSFW)/SFC Demo video: https://www.youtube.com/watch?v=2vWusqd3WJ4
+- http://docs.openstack.org/developer/networking-onos/devref/sfc_driver.html
+- http://docs.openstack.org/developer/networking-onos/specs/sfc_driver.html
+- https://blueprints.launchpad.net/networking-sfc/+spec/networking-sfc-onos-driver
Integration with ODL SDN controller
-------------------------------------
-http://docs.openstack.org/developer/networking-odl/specs/sfc-driver.html
-https://blueprints.launchpad.net/networking-sfc/+spec/opendaylight-sfc-driver
+-----------------------------------
+- http://docs.openstack.org/developer/networking-odl/specs/sfc-driver.html
+- https://blueprints.launchpad.net/networking-sfc/+spec/opendaylight-sfc-driver
+
+Videos
+------
+Openstack Tacker/Neutron SFC/ONOS Controller Demo
+- https://www.openstack.org/videos/austin-2016/realize-sfc-using-onos-controller
+
+VNFFG/ONOS Framework (ONOSFW) Demo
+- https://www.youtube.com/watch?v=2vWusqd3WJ4
-Openstack Tacker/Neutron SFC/ODL Controller Demo Video
-------------------------------------------------------
-https://www.openstack.org/videos/barcelona-2016/orchestrating-vnf-forwarding-graphs-and-sfc-using-opendaylight-neutron-and-tacker
+Openstack Tacker/Neutron SFC/ODL Controller Demo
+- https://www.openstack.org/videos/barcelona-2016/orchestrating-vnf-forwarding-graphs-and-sfc-using-opendaylight-neutron-and-tacker
diff --git a/etc/neutron/rootwrap.d/networking-sfc.filters b/etc/neutron/rootwrap.d/networking-sfc.filters
deleted file mode 100644
index 8bf36cf..0000000
--- a/etc/neutron/rootwrap.d/networking-sfc.filters
+++ /dev/null
@@ -1,9 +0,0 @@
-# networking-sfc-rootwrap command filters for nodes on which networking-sfc is
-# expected to control network
-#
-# This file should be owned by (and only-writeable by) the root user
-
-# format seems to be
-# cmd-name: filter-name, raw-command, user, args
-
-[Filters]
diff --git a/networking_sfc/__init__.py b/networking_sfc/__init__.py
deleted file mode 100644
index 6a9c541..0000000
--- a/networking_sfc/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pbr.version
-
-
-__version__ = pbr.version.VersionInfo(
- 'networking_sfc').version_string()
diff --git a/networking_sfc/cli/__init__.py b/networking_sfc/cli/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/cli/__init__.py
+++ /dev/null
diff --git a/networking_sfc/cli/flow_classifier.py b/networking_sfc/cli/flow_classifier.py
deleted file mode 100644
index 128aa1c..0000000
--- a/networking_sfc/cli/flow_classifier.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# Copyright (c) 2015 Huawei Technologies India Pvt.Limited.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutronclient.common import extension
-from neutronclient.common import utils
-from neutronclient.i18n import _
-from neutronclient.neutron import v2_0 as neutronv20
-
-from networking_sfc.cli import port_pair as pp
-
-FLOW_CLASSIFIER_RESOURCE = 'flow_classifier'
-
-
-def get_flowclassifier_id(client, id_or_name):
- return neutronv20.find_resourceid_by_name_or_id(client,
- FLOW_CLASSIFIER_RESOURCE,
- id_or_name)
-
-
-class FlowClassifier(extension.NeutronClientExtension):
- resource = FLOW_CLASSIFIER_RESOURCE
- resource_plural = '%ss' % resource
- object_path = '/sfc/%s' % resource_plural
- resource_path = '/sfc/%s/%%s' % resource_plural
- versions = ['2.0']
-
-
-class FlowClassifierCreate(extension.ClientExtensionCreate,
- FlowClassifier):
- """Create a Flow Classifier."""
-
- shell_command = 'flow-classifier-create'
-
- def add_known_arguments(self, parser):
- parser.add_argument(
- 'name',
- metavar='NAME',
- help=_('Name of the Flow Classifier.'))
- parser.add_argument(
- '--description',
- help=_('Description for the Flow Classifier.'))
- parser.add_argument(
- '--protocol',
- help=_('IP protocol name. Protocol name should be as per '
- 'IANA standard.'))
- parser.add_argument(
- '--ethertype',
- default='IPv4', choices=['IPv4', 'IPv6'],
- help=_('L2 ethertype, default is IPv4.'))
- parser.add_argument(
- '--source-port',
- help=_('Source protocol port (allowed range [1,65535]. Must be '
- 'specified as a:b, where a=min-port and b=max-port.)'))
- parser.add_argument(
- '--destination-port',
- help=_('Destination protocol port (allowed range [1,65535]. Must '
- 'be specified as a:b, where a=min-port and b=max-port.)'))
- parser.add_argument(
- '--source-ip-prefix',
- help=_('Source IP prefix or subnet.'))
- parser.add_argument(
- '--destination-ip-prefix',
- help=_('Destination IP prefix or subnet.'))
- parser.add_argument(
- '--logical-source-port',
- help=_('ID or name of the neutron source port.'))
- parser.add_argument(
- '--logical-destination-port',
- help=_('ID or name of the neutron destination port.'))
- parser.add_argument(
- '--l7-parameters',
- metavar='type=TYPE[,url=URL_PATH]',
- type=utils.str2dict,
- help=_('Dictionary of L7-parameters. Currently, no value is '
- 'supported for this option.'))
-
- def args2body(self, parsed_args):
- body = {}
- client = self.get_client()
- if parsed_args.logical_source_port:
- body['logical_source_port'] = pp.get_port_id(
- client, parsed_args.logical_source_port)
- if parsed_args.logical_destination_port:
- body['logical_destination_port'] = pp.get_port_id(
- client, parsed_args.logical_destination_port)
- if parsed_args.source_port:
- self._fill_protocol_port_info(body, 'source',
- parsed_args.source_port)
- if parsed_args.destination_port:
- self._fill_protocol_port_info(body, 'destination',
- parsed_args.destination_port)
- neutronv20.update_dict(parsed_args, body,
- ['name', 'description', 'protocol',
- 'ethertype', 'source_ip_prefix',
- 'destination_ip_prefix', 'l7_parameters'])
- return {self.resource: body}
-
- def _fill_protocol_port_info(self, body, port_type, port_val):
- min_port, sep, max_port = port_val.partition(":")
- if not max_port:
- max_port = min_port
- body[port_type + '_port_range_min'] = int(min_port)
- body[port_type + '_port_range_max'] = int(max_port)
-
-
-class FlowClassifierUpdate(extension.ClientExtensionUpdate,
- FlowClassifier):
- """Update Flow Classifier information."""
-
- shell_command = 'flow-classifier-update'
-
- def add_known_arguments(self, parser):
- parser.add_argument(
- '--name',
- metavar='NAME',
- help=_('Name of the Flow Classifier.'))
- parser.add_argument(
- '--description',
- help=_('Description for the Flow Classifier.'))
-
- def args2body(self, parsed_args):
- body = {}
- neutronv20.update_dict(parsed_args, body, ['name', 'description'])
- return {self.resource: body}
-
-
-class FlowClassifierDelete(extension.ClientExtensionDelete,
- FlowClassifier):
- """Delete a given Flow Classifier."""
-
- shell_command = 'flow-classifier-delete'
-
-
-class FlowClassifierList(extension.ClientExtensionList,
- FlowClassifier):
- """List Flow Classifiers that belong to a given tenant."""
-
- shell_command = 'flow-classifier-list'
- list_columns = ['id', 'name', 'summary']
- pagination_support = True
- sorting_support = True
-
- def extend_list(self, data, parsed_args):
- for d in data:
- val = []
- if d.get('protocol'):
- protocol = d['protocol'].upper()
- else:
- protocol = 'any'
- protocol = 'protocol: ' + protocol
- val.append(protocol)
- val.append(self._get_protocol_port_details(d, 'source'))
- val.append(self._get_protocol_port_details(d, 'destination'))
- if 'logical_source_port' in d:
- val.append('neutron_source_port: ' +
- str(d['logical_source_port']))
-
- if 'logical_destination_port' in d:
- val.append('neutron_destination_port: ' +
- str(d['logical_destination_port']))
-
- if 'l7_parameters' in d:
- l7_param = 'l7_parameters: ' + '{'
- for r in d['l7_parameters']:
- l7_param = l7_param + str(r).lower()
- l7_param = l7_param + '}'
- val.append(l7_param)
- d['summary'] = ',\n'.join(val)
-
- def _get_protocol_port_details(self, data, type):
- type_ip_prefix = type + '_ip_prefix'
- ip_prefix = data.get(type_ip_prefix)
- if ip_prefix is not None:
- port_info = type + '[port]: ' + str(ip_prefix)
- else:
- port_info = type + '[port]: any'
- min_port = data.get(type + '_port_range_min')
- if min_port is not None:
- max_port = data.get(type + '_port_range_max')
- port_info = (port_info + '[' + str(min_port) + ':' +
- str(max_port) + ']')
- else:
- port_info = port_info + '[any:any]'
- return port_info
-
-
-class FlowClassifierShow(extension.ClientExtensionShow, FlowClassifier):
- """Show information of a given Flow Classifier."""
-
- shell_command = 'flow-classifier-show'
diff --git a/networking_sfc/cli/port_chain.py b/networking_sfc/cli/port_chain.py
deleted file mode 100644
index 87bccf5..0000000
--- a/networking_sfc/cli/port_chain.py
+++ /dev/null
@@ -1,141 +0,0 @@
-# Copyright (c) 2015 Huawei Technologies India Pvt.Limited.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutronclient.common import extension
-from neutronclient.common import utils
-from neutronclient.i18n import _
-from neutronclient.neutron import v2_0 as neutronv20
-
-from networking_sfc.cli import flow_classifier as fc
-from networking_sfc.cli import port_pair_group as ppg
-
-PORT_CHAIN_RESOURCE = 'port_chain'
-
-
-class PortChain(extension.NeutronClientExtension):
- resource = PORT_CHAIN_RESOURCE
- resource_plural = '%ss' % resource
- object_path = '/sfc/%s' % resource_plural
- resource_path = '/sfc/%s/%%s' % resource_plural
- versions = ['2.0']
-
-
-class PortChainCreate(extension.ClientExtensionCreate, PortChain):
- """Create a Port Chain."""
-
- shell_command = 'port-chain-create'
-
- def add_known_arguments(self, parser):
- parser.add_argument(
- 'name',
- metavar='NAME',
- help=_('Name of the Port Chain.'))
- parser.add_argument(
- '--description',
- help=_('Description for the Port Chain.'))
- parser.add_argument(
- '--port-pair-group',
- metavar='PORT-PAIR-GROUP',
- dest='port_pair_groups',
- default=[], required=True,
- action='append',
- help=_('ID or name of the Port Pair Group. '
- 'This option can be repeated.'))
- parser.add_argument(
- '--flow-classifier',
- default=[],
- metavar='FLOW-CLASSIFIER',
- dest='flow_classifiers',
- action='append',
- help=_('ID or name of the Flow Classifier.'
- 'This option can be repeated.'))
- parser.add_argument(
- '--chain-parameters',
- metavar='type=TYPE[,correlation=CORRELATION_TYPE]',
- type=utils.str2dict,
- help=_('Dictionary of chain parameters. Currently, only '
- 'correlation=mpls is supported by default.'))
-
- def args2body(self, parsed_args):
- body = {}
- client = self.get_client()
- if parsed_args.port_pair_groups:
- body['port_pair_groups'] = [ppg.get_port_pair_group_id(client, p)
- for p in parsed_args.port_pair_groups]
- if parsed_args.flow_classifiers:
- body['flow_classifiers'] = [fc.get_flowclassifier_id(client, f)
- for f in parsed_args.flow_classifiers]
- neutronv20.update_dict(parsed_args, body, ['name', 'description',
- 'chain_parameters'])
- return {self.resource: body}
-
-
-class PortChainUpdate(extension.ClientExtensionUpdate, PortChain):
- """Update Port Chain's information."""
-
- shell_command = 'port-chain-update'
-
- def add_known_arguments(self, parser):
- parser.add_argument(
- '--name',
- metavar='NAME',
- help=_('Name of the Port Chain.'))
- parser.add_argument(
- '--description',
- help=_('Description for the Port Chain.'))
- fw_args = parser.add_mutually_exclusive_group()
- fw_args.add_argument(
- '--flow-classifier',
- metavar='FLOW-CLASSIFIER',
- dest='flow_classifiers',
- action='append',
- help=_('ID or name of the Flow Classifier. '
- 'This option can be repeated.'))
- fw_args.add_argument(
- '--no-flow-classifier',
- action='store_true',
- help=_('Associate no Flow Classifier with the Port Chain.'))
-
- def args2body(self, parsed_args):
- body = {}
- if parsed_args.flow_classifiers:
- client = self.get_client()
- body['flow_classifiers'] = [fc.get_flowclassifier_id(client, f)
- for f in parsed_args.flow_classifiers]
- elif parsed_args.no_flow_classifier:
- body['flow_classifiers'] = []
- neutronv20.update_dict(parsed_args, body, ['name', 'description'])
- return {self.resource: body}
-
-
-class PortChainDelete(extension.ClientExtensionDelete, PortChain):
- """Delete a given Port Chain."""
-
- shell_command = 'port-chain-delete'
-
-
-class PortChainList(extension.ClientExtensionList, PortChain):
- """List Port Chains that belong to a given tenant."""
-
- shell_command = 'port-chain-list'
- list_columns = ['id', 'name', 'port_pair_groups', 'flow_classifiers']
- pagination_support = True
- sorting_support = True
-
-
-class PortChainShow(extension.ClientExtensionShow, PortChain):
- """Show information of a given Port Chain."""
-
- shell_command = 'port-chain-show'
diff --git a/networking_sfc/cli/port_pair.py b/networking_sfc/cli/port_pair.py
deleted file mode 100644
index d934b02..0000000
--- a/networking_sfc/cli/port_pair.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright (c) 2015 Huawei Technologies India Pvt.Limited.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutronclient.common import extension
-from neutronclient.common import utils
-from neutronclient.i18n import _
-from neutronclient.neutron import v2_0 as neutronv20
-
-PORT_RESOURCE = 'port'
-PORT_PAIR_RESOURCE = 'port_pair'
-
-
-def get_port_id(client, id_or_name):
- return neutronv20.find_resourceid_by_name_or_id(client,
- PORT_RESOURCE,
- id_or_name)
-
-
-def get_port_pair_id(client, id_or_name):
- return neutronv20.find_resourceid_by_name_or_id(client,
- PORT_PAIR_RESOURCE,
- id_or_name)
-
-
-class PortPair(extension.NeutronClientExtension):
- resource = PORT_PAIR_RESOURCE
- resource_plural = '%ss' % resource
- object_path = '/sfc/%s' % resource_plural
- resource_path = '/sfc/%s/%%s' % resource_plural
- versions = ['2.0']
-
-
-class PortPairCreate(extension.ClientExtensionCreate, PortPair):
- """Create a Port Pair."""
-
- shell_command = 'port-pair-create'
-
- def add_known_arguments(self, parser):
- parser.add_argument(
- 'name',
- metavar='NAME',
- help=_('Name of the Port Pair.'))
- parser.add_argument(
- '--description',
- help=_('Description for the Port Pair.'))
- parser.add_argument(
- '--ingress',
- required=True,
- help=_('ID or name of the ingress neutron port.'))
- parser.add_argument(
- '--egress',
- required=True,
- help=_('ID or name of the egress neutron port.'))
- parser.add_argument(
- '--service-function-parameters',
- metavar='type=TYPE[,correlation=CORRELATION_TYPE]',
- type=utils.str2dict,
- help=_('Dictionary of Service function parameters. '
- 'Currently, only correlation=None is supported.'))
-
- def args2body(self, parsed_args):
- body = {}
- client = self.get_client()
- if parsed_args.ingress:
- body['ingress'] = get_port_id(client, parsed_args.ingress)
- if parsed_args.egress:
- body['egress'] = get_port_id(client, parsed_args.egress)
- neutronv20.update_dict(parsed_args, body,
- ['name', 'description',
- 'service_function_parameters'])
- return {self.resource: body}
-
-
-class PortPairUpdate(extension.ClientExtensionUpdate, PortPair):
- """Update Port Pair's information."""
-
- shell_command = 'port-pair-update'
-
- def add_known_arguments(self, parser):
- parser.add_argument(
- '--name',
- metavar='NAME',
- help=_('Name of the Port Pair.'))
- parser.add_argument(
- '--description',
- help=_('Description for the Port Pair.'))
-
- def args2body(self, parsed_args):
- body = {}
- neutronv20.update_dict(parsed_args, body, ['name', 'description'])
- return {self.resource: body}
-
-
-class PortPairDelete(extension.ClientExtensionDelete, PortPair):
- """Delete a given Port Pair."""
-
- shell_command = 'port-pair-delete'
-
-
-class PortPairList(extension.ClientExtensionList, PortPair):
- """List Port Pairs that belongs to a given tenant."""
-
- shell_command = 'port-pair-list'
- list_columns = ['id', 'name', 'ingress', 'egress']
- pagination_support = True
- sorting_support = True
-
-
-class PortPairShow(extension.ClientExtensionShow, PortPair):
- """Show information of a given Port Pair."""
-
- shell_command = 'port-pair-show'
diff --git a/networking_sfc/cli/port_pair_group.py b/networking_sfc/cli/port_pair_group.py
deleted file mode 100644
index 2df0b89..0000000
--- a/networking_sfc/cli/port_pair_group.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright (c) 2015 Huawei Technologies India Pvt.Limited.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutronclient.common import extension
-from neutronclient.i18n import _
-from neutronclient.neutron import v2_0 as neutronv20
-
-from networking_sfc.cli import port_pair as pp
-
-PORT_PAIR_GROUP_RESOURCE = 'port_pair_group'
-
-
-def get_port_pair_group_id(client, id_or_name):
- return neutronv20.find_resourceid_by_name_or_id(client,
- PORT_PAIR_GROUP_RESOURCE,
- id_or_name)
-
-
-class PortPairGroup(extension.NeutronClientExtension):
- resource = PORT_PAIR_GROUP_RESOURCE
- resource_plural = '%ss' % resource
- object_path = '/sfc/%s' % resource_plural
- resource_path = '/sfc/%s/%%s' % resource_plural
- versions = ['2.0']
-
-
-def add_common_arguments(parser):
- parser.add_argument(
- '--description',
- help=_('Description for the Port Pair Group.'))
- parser.add_argument(
- '--port-pair',
- metavar='PORT-PAIR',
- dest='port_pairs',
- default=[],
- action='append',
- help=_('ID or name of the Port Pair.'
- 'This option can be repeated.'))
-
-
-def update_common_args2body(client, body, parsed_args):
- if parsed_args.port_pairs:
- body['port_pairs'] = [(pp.get_port_pair_id(client, pp1))
- for pp1 in parsed_args.port_pairs]
- neutronv20.update_dict(parsed_args, body, ['name', 'description'])
- return body
-
-
-class PortPairGroupCreate(extension.ClientExtensionCreate, PortPairGroup):
- """Create a Port Pair Group."""
- shell_command = 'port-pair-group-create'
-
- def add_known_arguments(self, parser):
- parser.add_argument(
- 'name',
- metavar='NAME',
- help=_('Name of the Port Pair Group.'))
- add_common_arguments(parser)
-
- def args2body(self, parsed_args):
- body = {}
- body = update_common_args2body(self.get_client(), body, parsed_args)
- return {self.resource: body}
-
-
-class PortPairGroupUpdate(extension.ClientExtensionUpdate, PortPairGroup):
- """Update Port Pair Group's information."""
-
- shell_command = 'port-pair-group-update'
-
- def add_known_arguments(self, parser):
- parser.add_argument(
- '--name',
- metavar='NAME',
- help=_('Name of the Port Pair Group.'))
- add_common_arguments(parser)
-
- def args2body(self, parsed_args):
- body = {}
- body = update_common_args2body(self.get_client(), body, parsed_args)
- return {self.resource: body}
-
-
-class PortPairGroupDelete(extension.ClientExtensionDelete, PortPairGroup):
- """Delete a given Port Pair Group."""
-
- shell_command = 'port-pair-group-delete'
-
-
-class PortPairGroupList(extension.ClientExtensionList, PortPairGroup):
- """List Port Pair Groups that belongs to a given tenant."""
-
- shell_command = 'port-pair-group-list'
- list_columns = ['id', 'name', 'port_pairs']
- pagination_support = True
- sorting_support = True
-
-
-class PortPairGroupShow(extension.ClientExtensionShow, PortPairGroup):
- """Show information of a given Port Pair Group."""
-
- shell_command = 'port-pair-group-show'
diff --git a/networking_sfc/db/__init__.py b/networking_sfc/db/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/db/__init__.py
+++ /dev/null
diff --git a/networking_sfc/db/flowclassifier_db.py b/networking_sfc/db/flowclassifier_db.py
deleted file mode 100644
index 44e1c89..0000000
--- a/networking_sfc/db/flowclassifier_db.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import six
-
-from oslo_log import helpers as log_helpers
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-from sqlalchemy.orm.collections import attribute_mapped_collection
-from sqlalchemy.orm import exc
-
-from neutron.common import constants as const
-from neutron.db import common_db_mixin
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.i18n import _LI
-
-from networking_sfc.extensions import flowclassifier as fc_ext
-
-LOG = logging.getLogger(__name__)
-
-
-class L7Parameter(model_base.BASEV2):
- """Represents a L7 parameter."""
- __tablename__ = 'sfc_flow_classifier_l7_parameters'
- keyword = sa.Column(sa.String(255), primary_key=True)
- value = sa.Column(sa.String(255))
- classifier_id = sa.Column(
- sa.String(36),
- sa.ForeignKey('sfc_flow_classifiers.id', ondelete='CASCADE'),
- primary_key=True)
-
-
-class FlowClassifier(model_base.BASEV2, models_v2.HasId,
- models_v2.HasTenant):
- """Represents a v2 neutron flow classifier."""
- __tablename__ = 'sfc_flow_classifiers'
-
- name = sa.Column(sa.String(255))
- ethertype = sa.Column(sa.String(40))
- protocol = sa.Column(sa.String(40))
- description = sa.Column(sa.String(255))
- source_port_range_min = sa.Column(sa.Integer)
- source_port_range_max = sa.Column(sa.Integer)
- destination_port_range_min = sa.Column(sa.Integer)
- destination_port_range_max = sa.Column(sa.Integer)
- source_ip_prefix = sa.Column(sa.String(255))
- destination_ip_prefix = sa.Column(sa.String(255))
- l7_parameters = orm.relationship(
- L7Parameter,
- collection_class=attribute_mapped_collection('keyword'),
- cascade='all, delete-orphan')
-
-
-class FlowClassifierDbPlugin(fc_ext.FlowClassifierPluginBase,
- common_db_mixin.CommonDbMixin):
-
- def _check_port_range_valid(self, port_range_min,
- port_range_max,
- protocol):
- if (
- port_range_min is not None and
- port_range_max is not None and
- port_range_min > port_range_max
- ):
- raise fc_ext.FlowClassifierInvalidPortRange(
- port_range_min=port_range_min,
- port_range_max=port_range_max
- )
- if port_range_min is not None or port_range_max is not None:
- if protocol not in [const.PROTO_NAME_TCP, const.PROTO_NAME_UDP]:
- raise fc_ext.FlowClassifierProtocolRequiredWithPorts()
-
- def _get_fixed_ip_from_port(self, context, logical_port, ip_prefix):
- if logical_port is not None:
- self._get_port(context, logical_port)
- return ip_prefix
-
- @log_helpers.log_method_call
- def create_flow_classifier(self, context, flow_classifier):
- fc = flow_classifier['flow_classifier']
- tenant_id = self._get_tenant_id_for_create(context, fc)
- l7_parameters = {
- key: L7Parameter(key, val)
- for key, val in six.iteritems(fc['l7_parameters'])}
- source_port_range_min = fc['source_port_range_min']
- source_port_range_max = fc['source_port_range_max']
-
- self._check_port_range_valid(source_port_range_min,
- source_port_range_max,
- fc['protocol'])
- destination_port_range_min = fc['destination_port_range_min']
- destination_port_range_max = fc['destination_port_range_max']
- self._check_port_range_valid(destination_port_range_min,
- destination_port_range_max,
- fc['protocol'])
- source_ip_prefix = fc['source_ip_prefix']
- destination_ip_prefix = fc['destination_ip_prefix']
-
- logical_source_port = fc['logical_source_port']
- logical_destination_port = fc['logical_destination_port']
- with context.session.begin(subtransactions=True):
- source_ip_prefix = self._get_fixed_ip_from_port(
- context, logical_source_port, source_ip_prefix)
- destination_ip_prefix = self._get_fixed_ip_from_port(
- context, logical_destination_port, destination_ip_prefix)
- flow_classifier_db = FlowClassifier(
- id=uuidutils.generate_uuid(),
- tenant_id=tenant_id,
- name=fc['name'],
- description=fc['description'],
- ethertype=fc['ethertype'],
- protocol=fc['protocol'],
- source_port_range_min=source_port_range_min,
- source_port_range_max=source_port_range_max,
- destination_port_range_min=destination_port_range_min,
- destination_port_range_max=destination_port_range_max,
- source_ip_prefix=source_ip_prefix,
- destination_ip_prefix=destination_ip_prefix,
- l7_parameters=l7_parameters
- )
- context.session.add(flow_classifier_db)
- return self._make_flow_classifier_dict(flow_classifier_db)
-
- def _make_flow_classifier_dict(self, flow_classifier, fields=None):
- res = {
- 'id': flow_classifier['id'],
- 'name': flow_classifier['name'],
- 'description': flow_classifier['description'],
- 'tenant_id': flow_classifier['tenant_id'],
- 'ethertype': flow_classifier['ethertype'],
- 'protocol': flow_classifier['protocol'],
- 'source_port_range_min': flow_classifier['source_port_range_min'],
- 'source_port_range_max': flow_classifier['source_port_range_max'],
- 'destination_port_range_min': (
- flow_classifier['destination_port_range_min']),
- 'destination_port_range_max': (
- flow_classifier['destination_port_range_max']),
- 'source_ip_prefix': flow_classifier['source_ip_prefix'],
- 'destination_ip_prefix': flow_classifier['destination_ip_prefix'],
- 'l7_parameters': {
- param['keyword']: param['value']
- for k, param in six.iteritems(flow_classifier.l7_parameters)
- }
-
- }
- return self._fields(res, fields)
-
- @log_helpers.log_method_call
- def get_flow_classifiers(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- marker_obj = self._get_marker_obj(context, 'flow_classifier',
- limit, marker)
- return self._get_collection(context,
- FlowClassifier,
- self._make_flow_classifier_dict,
- filters=filters, fields=fields,
- sorts=sorts,
- limit=limit, marker_obj=marker_obj,
- page_reverse=page_reverse)
-
- @log_helpers.log_method_call
- def get_flow_classifier(self, context, id, fields=None):
- flow_classifier = self._get_flow_classifier(context, id)
- return self._make_flow_classifier_dict(flow_classifier, fields)
-
- def _get_flow_classifier(self, context, id):
- try:
- return self._get_by_id(context, FlowClassifier, id)
- except exc.NoResultFound:
- raise fc_ext.FlowClassifierNotFound(id=id)
-
- def _get_port(self, context, id):
- try:
- return self._get_by_id(context, models_v2.Port, id)
- except exc.NoResultFound:
- raise fc_ext.FlowClassifierPortNotFound(id=id)
-
- @log_helpers.log_method_call
- def update_flow_classifier(self, context, id, flow_classifier):
- new_fc = flow_classifier['flow_classifier']
- with context.session.begin(subtransactions=True):
- old_fc = self._get_flow_classifier(context, id)
- old_fc.update(new_fc)
- return self._make_flow_classifier_dict(old_fc)
-
- @log_helpers.log_method_call
- def delete_flow_classifier(self, context, id):
- try:
- with context.session.begin(subtransactions=True):
- fc = self._get_flow_classifier(context, id)
- context.session.delete(fc)
- except AssertionError:
- raise fc_ext.FlowClassifierInUse(id=id)
- except fc_ext.FlowClassifierNotFound:
- LOG.info(_LI("Deleting a non-existing flow classifier."))
diff --git a/networking_sfc/db/migration/README b/networking_sfc/db/migration/README
deleted file mode 100644
index 20c6fb9..0000000
--- a/networking_sfc/db/migration/README
+++ /dev/null
@@ -1,3 +0,0 @@
-For details refer to:
-http://docs.openstack.org/developer/networking-sfc/alembic_migration.html
-
diff --git a/networking_sfc/db/migration/__init__.py b/networking_sfc/db/migration/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/db/migration/__init__.py
+++ /dev/null
diff --git a/networking_sfc/db/migration/alembic_migrations/__init__.py b/networking_sfc/db/migration/alembic_migrations/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/db/migration/alembic_migrations/__init__.py
+++ /dev/null
diff --git a/networking_sfc/db/migration/alembic_migrations/env.py b/networking_sfc/db/migration/alembic_migrations/env.py
deleted file mode 100644
index e2f858a..0000000
--- a/networking_sfc/db/migration/alembic_migrations/env.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from logging import config as logging_config
-
-from alembic import context
-from neutron.db import model_base
-from oslo_config import cfg
-from oslo_db.sqlalchemy import session
-import sqlalchemy as sa
-from sqlalchemy import event
-
-from networking_sfc.db.migration.models import head # noqa
-
-
-MYSQL_ENGINE = None
-SFC_VERSION_TABLE = 'alembic_version_sfc'
-config = context.config
-neutron_config = config.neutron_config
-logging_config.fileConfig(config.config_file_name)
-target_metadata = model_base.BASEV2.metadata
-
-
-def set_mysql_engine():
- try:
- mysql_engine = neutron_config.command.mysql_engine
- except cfg.NoSuchOptError:
- mysql_engine = None
-
- global MYSQL_ENGINE
- MYSQL_ENGINE = (mysql_engine or
- model_base.BASEV2.__table_args__['mysql_engine'])
-
-
-def run_migrations_offline():
- set_mysql_engine()
-
- kwargs = dict()
- if neutron_config.database.connection:
- kwargs['url'] = neutron_config.database.connection
- else:
- kwargs['dialect_name'] = neutron_config.database.engine
- kwargs['version_table'] = SFC_VERSION_TABLE
- context.configure(**kwargs)
-
- with context.begin_transaction():
- context.run_migrations()
-
-
-@event.listens_for(sa.Table, 'after_parent_attach')
-def set_storage_engine(target, parent):
- if MYSQL_ENGINE:
- target.kwargs['mysql_engine'] = MYSQL_ENGINE
-
-
-def run_migrations_online():
- set_mysql_engine()
- engine = session.create_engine(neutron_config.database.connection)
-
- connection = engine.connect()
- context.configure(
- connection=connection,
- target_metadata=target_metadata,
- version_table=SFC_VERSION_TABLE
- )
- try:
- with context.begin_transaction():
- context.run_migrations()
- finally:
- connection.close()
- engine.dispose()
-
-
-if context.is_offline_mode():
- run_migrations_offline()
-else:
- run_migrations_online()
diff --git a/networking_sfc/db/migration/alembic_migrations/script.py.mako b/networking_sfc/db/migration/alembic_migrations/script.py.mako
deleted file mode 100644
index 5f14159..0000000
--- a/networking_sfc/db/migration/alembic_migrations/script.py.mako
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-"""${message}
-
-Revision ID: ${up_revision}
-Revises: ${down_revision}
-Create Date: ${create_date}
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = ${repr(up_revision)}
-down_revision = ${repr(down_revision)}
-% if branch_labels:
-branch_labels = ${repr(branch_labels)}
-%endif
-
-from alembic import op
-import sqlalchemy as sa
-${imports if imports else ""}
-
-def upgrade():
- ${upgrades if upgrades else "pass"}
diff --git a/networking_sfc/db/migration/alembic_migrations/versions/HEADS b/networking_sfc/db/migration/alembic_migrations/versions/HEADS
deleted file mode 100644
index 152ab9e..0000000
--- a/networking_sfc/db/migration/alembic_migrations/versions/HEADS
+++ /dev/null
@@ -1,2 +0,0 @@
-48072cb59133
-5a475fc853e6
diff --git a/networking_sfc/db/migration/alembic_migrations/versions/liberty/contract/48072cb59133_initial.py b/networking_sfc/db/migration/alembic_migrations/versions/liberty/contract/48072cb59133_initial.py
deleted file mode 100644
index 87b53ce..0000000
--- a/networking_sfc/db/migration/alembic_migrations/versions/liberty/contract/48072cb59133_initial.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Initial Mitaka no-op script.
-
-Revision ID: 48072cb59133
-Revises: start_networking_sfc
-Create Date: 2015-07-28 22:18:13.330846
-
-"""
-
-from neutron.db.migration import cli
-
-
-# revision identifiers, used by Alembic.
-revision = '48072cb59133'
-down_revision = 'start_networking_sfc'
-branch_labels = (cli.CONTRACT_BRANCH,)
-
-
-def upgrade():
- pass
diff --git a/networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/24fc7241aa5_initial.py b/networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/24fc7241aa5_initial.py
deleted file mode 100644
index f23c9b3..0000000
--- a/networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/24fc7241aa5_initial.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Initial Mitaka no-op script.
-
-Revision ID: 24fc7241aa5
-Revises: start_networking_sfc
-Create Date: 2015-09-11 11:37:19.349951
-
-"""
-
-from neutron.db.migration import cli
-
-
-# revision identifiers, used by Alembic.
-revision = '24fc7241aa5'
-down_revision = 'start_networking_sfc'
-branch_labels = (cli.EXPAND_BRANCH,)
-
-
-def upgrade():
- pass
diff --git a/networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/5a475fc853e6_ovs_data_model.py b/networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/5a475fc853e6_ovs_data_model.py
deleted file mode 100644
index e257548..0000000
--- a/networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/5a475fc853e6_ovs_data_model.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Defining OVS data-model
-
-Revision ID: 5a475fc853e6
-Revises: c3e178d4a985
-Create Date: 2015-09-30 18:00:57.758762
-
-"""
-
-from alembic import op
-import sqlalchemy as sa
-
-
-# revision identifiers, used by Alembic.
-revision = '5a475fc853e6'
-down_revision = 'c3e178d4a985'
-
-
-def upgrade():
- op.create_table('sfc_portpair_details',
- sa.Column('tenant_id', sa.String(length=255), nullable=True),
- sa.Column('id', sa.String(length=36), nullable=False),
- sa.Column('ingress', sa.String(length=36), nullable=True),
- sa.Column('egress', sa.String(length=36), nullable=True),
- sa.Column('host_id', sa.String(length=255), nullable=False),
- sa.Column('mac_address', sa.String(length=32), nullable=False),
- sa.Column('network_type', sa.String(length=8), nullable=True),
- sa.Column('segment_id', sa.Integer(), nullable=True),
- sa.Column('local_endpoint', sa.String(length=64), nullable=False),
- sa.PrimaryKeyConstraint('id')
- )
-
- op.create_index(
- op.f('ix_sfc_portpair_details_tenant_id'),
- 'sfc_portpair_details', ['tenant_id'], unique=False
- )
- op.create_table('sfc_uuid_intid_associations',
- sa.Column('id', sa.String(length=36), nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('intid', sa.Integer(), nullable=False),
- sa.Column('type_', sa.String(length=32), nullable=False),
- sa.PrimaryKeyConstraint('id', 'uuid'),
- sa.UniqueConstraint('intid')
- )
-
- op.create_table('sfc_path_nodes',
- sa.Column('tenant_id', sa.String(length=255), nullable=True),
- sa.Column('id', sa.String(length=36), nullable=False),
- sa.Column('nsp', sa.Integer(), nullable=False),
- sa.Column('nsi', sa.Integer(), nullable=False),
- sa.Column('node_type', sa.String(length=32), nullable=True),
- sa.Column('portchain_id', sa.String(length=255), nullable=True),
- sa.Column('status', sa.String(length=32), nullable=True),
- sa.Column('next_group_id', sa.Integer(), nullable=True),
- sa.Column('next_hop', sa.String(length=512), nullable=True),
- sa.ForeignKeyConstraint(['portchain_id'], ['sfc_port_chains.id'],
- ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('id')
- )
- op.create_index(
- op.f('ix_sfc_path_nodes_tenant_id'),
- 'sfc_path_nodes', ['tenant_id'], unique=False
- )
-
- op.create_table('sfc_path_port_associations',
- sa.Column('pathnode_id', sa.String(length=36), nullable=False),
- sa.Column('portpair_id', sa.String(length=36), nullable=False),
- sa.Column('weight', sa.Integer(), nullable=False),
- sa.ForeignKeyConstraint(['pathnode_id'], ['sfc_path_nodes.id'],
- ondelete='CASCADE'),
- sa.ForeignKeyConstraint(['portpair_id'], ['sfc_portpair_details.id'],
- ondelete='CASCADE'),
- sa.PrimaryKeyConstraint('pathnode_id', 'portpair_id')
- )
diff --git a/networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/9768e6a66c9_flowclassifier_data_model.py b/networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/9768e6a66c9_flowclassifier_data_model.py
deleted file mode 100644
index e43e9c1..0000000
--- a/networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/9768e6a66c9_flowclassifier_data_model.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Defining flow-classifier data-model
-
-Revision ID: 9768e6a66c9
-Revises: 24fc7241aa5
-Create Date: 2015-09-30 17:54:35.852573
-
-"""
-
-from alembic import op
-import sqlalchemy as sa
-
-from neutron.api.v2 import attributes as attr
-
-# revision identifiers, used by Alembic.
-revision = '9768e6a66c9'
-down_revision = '24fc7241aa5'
-
-
-def upgrade():
- op.create_table(
- 'sfc_flow_classifiers',
- sa.Column('tenant_id', sa.String(length=attr.TENANT_ID_MAX_LEN),
- nullable=True, index=True),
- sa.Column('id', sa.String(length=36), nullable=False),
- sa.Column('name', sa.String(length=attr.NAME_MAX_LEN), nullable=True),
- sa.Column('ethertype', sa.String(length=40), nullable=True),
- sa.Column('protocol', sa.String(length=40), nullable=True),
- sa.Column('description', sa.String(length=attr.DESCRIPTION_MAX_LEN),
- nullable=True),
- sa.Column('source_port_range_min', sa.Integer(), nullable=True),
- sa.Column('source_port_range_max', sa.Integer(), nullable=True),
- sa.Column('destination_port_range_min', sa.Integer(), nullable=True),
- sa.Column('destination_port_range_max', sa.Integer(), nullable=True),
- sa.Column('source_ip_prefix', sa.String(length=255), nullable=True),
- sa.Column('destination_ip_prefix', sa.String(length=255),
- nullable=True),
- sa.PrimaryKeyConstraint('id')
- )
-
- op.create_table(
- 'sfc_flow_classifier_l7_parameters',
- sa.Column('keyword', sa.String(length=255), nullable=False),
- sa.Column('value', sa.String(length=255), nullable=True),
- sa.Column('classifier_id', sa.String(length=36), nullable=False),
- sa.ForeignKeyConstraint(['classifier_id'], ['sfc_flow_classifiers.id'], ),
- sa.PrimaryKeyConstraint('keyword', 'classifier_id')
- )
diff --git a/networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/c3e178d4a985_sfc_data_model.py b/networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/c3e178d4a985_sfc_data_model.py
deleted file mode 100644
index 9f5362a..0000000
--- a/networking_sfc/db/migration/alembic_migrations/versions/liberty/expand/c3e178d4a985_sfc_data_model.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Defining Port Chain data-model.
-
-Revision ID: c3e178d4a985
-Revises: 9768e6a66c9
-Create Date: 2015-09-11 11:37:19.349951
-
-"""
-
-from alembic import op
-import sqlalchemy as sa
-
-from neutron.api.v2 import attributes as attr
-
-# revision identifiers, used by Alembic.
-revision = 'c3e178d4a985'
-down_revision = '9768e6a66c9'
-
-
-def upgrade():
- op.create_table(
- 'sfc_port_pair_groups',
- sa.Column('id', sa.String(length=36), nullable=False),
- sa.Column('tenant_id', sa.String(length=attr.TENANT_ID_MAX_LEN),
- nullable=True, index=True),
- sa.Column('name', sa.String(length=attr.NAME_MAX_LEN),
- nullable=True),
- sa.Column('description', sa.String(length=attr.DESCRIPTION_MAX_LEN),
- nullable=True),
- sa.PrimaryKeyConstraint('id')
- )
-
- op.create_table(
- 'sfc_port_pairs',
- sa.Column('tenant_id', sa.String(length=attr.TENANT_ID_MAX_LEN),
- nullable=True, index=True),
- sa.Column('id', sa.String(length=36), nullable=False),
- sa.Column('name', sa.String(length=attr.NAME_MAX_LEN), nullable=True),
- sa.Column('description', sa.String(length=attr.DESCRIPTION_MAX_LEN),
- nullable=True),
- sa.Column('ingress', sa.String(length=36), nullable=False),
- sa.Column('egress', sa.String(length=36), nullable=False),
- sa.Column('portpairgroup_id', sa.String(length=36), nullable=True),
- sa.ForeignKeyConstraint(['egress'], ['ports.id'],
- ondelete='RESTRICT'),
- sa.ForeignKeyConstraint(['ingress'], ['ports.id'],
- ondelete='RESTRICT'),
- sa.ForeignKeyConstraint(['portpairgroup_id'], ['sfc_port_pair_groups.id'],
- ondelete='RESTRICT'),
- sa.PrimaryKeyConstraint('id'),
- sa.UniqueConstraint('ingress', 'egress',
- name='uniq_sfc_port_pairs0ingress0egress')
- )
-
- op.create_table(
- 'sfc_port_chains',
- sa.Column('tenant_id', sa.String(length=attr.TENANT_ID_MAX_LEN),
- nullable=True, index=True),
- sa.Column('id', sa.String(length=36), nullable=False),
- sa.Column('name', sa.String(length=attr.NAME_MAX_LEN),
- nullable=True),
- sa.Column('description', sa.String(length=attr.DESCRIPTION_MAX_LEN),
- nullable=True),
- sa.PrimaryKeyConstraint('id')
- )
-
- op.create_table(
- 'sfc_chain_group_associations',
- sa.Column('portpairgroup_id', sa.String(length=36), nullable=False),
- sa.Column('portchain_id', sa.String(length=36), nullable=False),
- sa.Column('position', sa.Integer(), nullable=True),
- sa.ForeignKeyConstraint(['portchain_id'], ['sfc_port_chains.id'], ),
- sa.ForeignKeyConstraint(['portpairgroup_id'], ['sfc_port_pair_groups.id'],
- ondelete='RESTRICT'),
- sa.PrimaryKeyConstraint('portpairgroup_id', 'portchain_id')
- )
-
- op.create_table(
- 'sfc_port_chain_parameters',
- sa.Column('keyword', sa.String(length=255), nullable=False),
- sa.Column('value', sa.String(length=255), nullable=True),
- sa.Column('chain_id', sa.String(length=36), nullable=False),
- sa.ForeignKeyConstraint(['chain_id'], ['sfc_port_chains.id'], ),
- sa.PrimaryKeyConstraint('keyword', 'chain_id')
- )
-
- op.create_table(
- 'sfc_service_function_params',
- sa.Column('keyword', sa.String(length=255), nullable=False),
- sa.Column('value', sa.String(length=255), nullable=True),
- sa.Column('pair_id', sa.String(length=36), nullable=False),
- sa.ForeignKeyConstraint(['pair_id'], ['sfc_port_pairs.id'], ),
- sa.PrimaryKeyConstraint('keyword', 'pair_id')
- )
-
- op.create_table(
- 'sfc_chain_classifier_associations',
- sa.Column('flowclassifier_id', sa.String(length=36), nullable=False),
- sa.Column('portchain_id', sa.String(length=36), nullable=False),
- sa.ForeignKeyConstraint(['flowclassifier_id'],
- ['sfc_flow_classifiers.id'],
- ondelete='RESTRICT'),
- sa.ForeignKeyConstraint(['portchain_id'], ['sfc_port_chains.id'], ),
- sa.PrimaryKeyConstraint('flowclassifier_id', 'portchain_id'),
- sa.UniqueConstraint('flowclassifier_id')
- )
diff --git a/networking_sfc/db/migration/alembic_migrations/versions/start_networking_sfc.py b/networking_sfc/db/migration/alembic_migrations/versions/start_networking_sfc.py
deleted file mode 100644
index 4810d1e..0000000
--- a/networking_sfc/db/migration/alembic_migrations/versions/start_networking_sfc.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-"""start networking-sfc chain
-
-Revision ID: start_networking_sfc
-Revises: None
-Create Date: 2015-09-10 18:42:08.262632
-
-"""
-
-# revision identifiers, used by Alembic.
-revision = 'start_networking_sfc'
-down_revision = None
-
-
-def upgrade():
- pass
diff --git a/networking_sfc/db/migration/models/__init__.py b/networking_sfc/db/migration/models/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/db/migration/models/__init__.py
+++ /dev/null
diff --git a/networking_sfc/db/migration/models/head.py b/networking_sfc/db/migration/models/head.py
deleted file mode 100644
index 1345cd6..0000000
--- a/networking_sfc/db/migration/models/head.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron.db import model_base
-
-from networking_sfc.db import flowclassifier_db # noqa
-from networking_sfc.db import sfc_db # noqa
-from networking_sfc.services.sfc.drivers.ovs import db as ovs_db # noqa
-
-
-def get_metadata():
- return model_base.BASEV2.metadata
diff --git a/networking_sfc/db/sfc_db.py b/networking_sfc/db/sfc_db.py
deleted file mode 100644
index 6dcd349..0000000
--- a/networking_sfc/db/sfc_db.py
+++ /dev/null
@@ -1,553 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import six
-
-from oslo_log import helpers as log_helpers
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-
-import sqlalchemy as sa
-from sqlalchemy.ext.orderinglist import ordering_list
-from sqlalchemy import orm
-from sqlalchemy.orm.collections import attribute_mapped_collection
-from sqlalchemy.orm import exc
-
-from neutron.api.v2.attributes import DESCRIPTION_MAX_LEN
-from neutron.api.v2.attributes import NAME_MAX_LEN
-from neutron.db import common_db_mixin
-from neutron.db import model_base
-from neutron.db import models_v2
-from neutron.i18n import _LI
-
-from networking_sfc.db import flowclassifier_db as fc_db
-from networking_sfc.extensions import flowclassifier as ext_fc
-from networking_sfc.extensions import sfc as ext_sfc
-
-
-LOG = logging.getLogger(__name__)
-
-UUID_LEN = 36
-PARAM_LEN = 255
-
-
-class ChainParameter(model_base.BASEV2):
- """Represents a single chain parameter."""
- __tablename__ = 'sfc_port_chain_parameters'
- keyword = sa.Column(sa.String(PARAM_LEN), primary_key=True)
- value = sa.Column(sa.String(PARAM_LEN))
- chain_id = sa.Column(
- sa.String(UUID_LEN),
- sa.ForeignKey('sfc_port_chains.id', ondelete='CASCADE'),
- primary_key=True)
-
-
-class ServiceFunctionParam(model_base.BASEV2):
- """Represents a service function parameter."""
- __tablename__ = 'sfc_service_function_params'
- keyword = sa.Column(sa.String(PARAM_LEN), primary_key=True)
- value = sa.Column(sa.String(PARAM_LEN))
- pair_id = sa.Column(
- sa.String(UUID_LEN),
- sa.ForeignKey('sfc_port_pairs.id', ondelete='CASCADE'),
- primary_key=True)
-
-
-class ChainClassifierAssoc(model_base.BASEV2):
- """Relation table between sfc_port_chains and flow_classifiers."""
- __tablename__ = 'sfc_chain_classifier_associations'
- flowclassifier_id = sa.Column(
- sa.String(UUID_LEN),
- sa.ForeignKey('sfc_flow_classifiers.id', ondelete='RESTRICT'),
- primary_key=True, nullable=False, unique=True)
- portchain_id = sa.Column(
- sa.String(UUID_LEN),
- sa.ForeignKey('sfc_port_chains.id', ondelete='CASCADE'),
- primary_key=True)
- flow_classifier = orm.relationship(
- fc_db.FlowClassifier,
- backref='chain_classifier_associations'
- )
-
-
-class PortPair(model_base.BASEV2, models_v2.HasId,
- models_v2.HasTenant):
- """Represents the ingress and egress ports for a single service function.
-
- """
- __tablename__ = 'sfc_port_pairs'
- name = sa.Column(sa.String(NAME_MAX_LEN))
- description = sa.Column(sa.String(DESCRIPTION_MAX_LEN))
- ingress = sa.Column(
- sa.String(UUID_LEN),
- sa.ForeignKey('ports.id', ondelete='RESTRICT'),
- nullable=False)
- egress = sa.Column(
- sa.String(UUID_LEN),
- sa.ForeignKey('ports.id', ondelete='RESTRICT'),
- nullable=False)
-
- portpairgroup_id = sa.Column(
- sa.String(UUID_LEN),
- sa.ForeignKey('sfc_port_pair_groups.id', ondelete='RESTRICT'))
- service_function_parameters = orm.relationship(
- ServiceFunctionParam,
- collection_class=attribute_mapped_collection('keyword'),
- cascade='all, delete-orphan')
-
- __table_args__ = (
- sa.UniqueConstraint(
- ingress, egress,
- name='uniq_sfc_port_pairs0ingress0egress'
- ),
- model_base.BASEV2.__table_args__
- )
-
-
-class ChainGroupAssoc(model_base.BASEV2):
- """Relation table between sfc_port_chains and sfc_port_pair_groups."""
- __tablename__ = 'sfc_chain_group_associations'
- portpairgroup_id = sa.Column(
- sa.String(UUID_LEN),
- sa.ForeignKey('sfc_port_pair_groups.id', ondelete='RESTRICT'),
- primary_key=True, nullable=False)
- portchain_id = sa.Column(
- sa.String(UUID_LEN),
- sa.ForeignKey('sfc_port_chains.id', ondelete='CASCADE'),
- primary_key=True)
- position = sa.Column(sa.Integer)
-
-
-class PortPairGroup(model_base.BASEV2, models_v2.HasId,
- models_v2.HasTenant):
- """Represents a port pair group model."""
- __tablename__ = 'sfc_port_pair_groups'
- name = sa.Column(sa.String(NAME_MAX_LEN))
- description = sa.Column(sa.String(DESCRIPTION_MAX_LEN))
- port_pairs = orm.relationship(
- PortPair,
- backref='port_pair_group'
- )
- chain_group_associations = orm.relationship(
- ChainGroupAssoc,
- backref='port_pair_groups')
-
-
-class PortChain(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
- """Represents a Neutron service function Port Chain."""
- __tablename__ = 'sfc_port_chains'
-
- name = sa.Column(sa.String(NAME_MAX_LEN))
- description = sa.Column(sa.String(DESCRIPTION_MAX_LEN))
- chain_group_associations = orm.relationship(
- ChainGroupAssoc,
- backref='port_chain',
- order_by="ChainGroupAssoc.position",
- collection_class=ordering_list('position'),
- cascade='all, delete-orphan')
- chain_classifier_associations = orm.relationship(
- ChainClassifierAssoc,
- backref='port_chain',
- cascade='all, delete-orphan')
- chain_parameters = orm.relationship(
- ChainParameter,
- collection_class=attribute_mapped_collection('keyword'),
- cascade='all, delete-orphan')
-
-
-class SfcDbPlugin(
- ext_sfc.SfcPluginBase,
- common_db_mixin.CommonDbMixin
-):
- """Mixin class to add port chain to db_plugin_base_v2."""
-
- def _make_port_chain_dict(self, port_chain, fields=None):
- res = {
- 'id': port_chain['id'],
- 'name': port_chain['name'],
- 'tenant_id': port_chain['tenant_id'],
- 'description': port_chain['description'],
- 'port_pair_groups': [
- assoc['portpairgroup_id']
- for assoc in port_chain['chain_group_associations']
- ],
- 'flow_classifiers': [
- assoc['flowclassifier_id']
- for assoc in port_chain['chain_classifier_associations']
- ],
- 'chain_parameters': {
- param['keyword']: param['value']
- for k, param in six.iteritems(port_chain['chain_parameters'])
- }
- }
- return self._fields(res, fields)
-
- def _validate_port_pair_groups(self, context, pg_ids):
- with context.session.begin(subtransactions=True):
- query = self._model_query(context, PortChain)
- for port_chain_db in query.all():
- pc_pg_ids = [
- assoc['portpairgroup_id']
- for assoc in port_chain_db.chain_group_associations
- ]
- if pc_pg_ids == pg_ids:
- raise ext_sfc.InvalidPortPairGroups(
- port_pair_groups=pg_ids, port_chain=port_chain_db.id)
-
- def _validate_flow_classifiers(self, context, fc_ids):
- # TODO(xiaodong): Validate flow classifiers if needed in future.
- pass
-
- def _setup_chain_group_associations(
- self, context, port_chain, pg_ids
- ):
- with context.session.begin(subtransactions=True):
- chain_group_associations = []
- for pg_id in pg_ids:
- query = self._model_query(context, ChainGroupAssoc)
- chain_group_association = query.filter_by(
- portchain_id=port_chain.id, portpairgroup_id=pg_id
- ).first()
- if not chain_group_association:
- chain_group_association = ChainGroupAssoc(
- portpairgroup_id=pg_id
- )
- chain_group_associations.append(chain_group_association)
- port_chain.chain_group_associations = chain_group_associations
-
- def _setup_chain_classifier_associations(
- self, context, port_chain, fc_ids
- ):
- with context.session.begin(subtransactions=True):
- chain_classifier_associations = []
- for fc_id in fc_ids:
- query = self._model_query(context, ChainClassifierAssoc)
- chain_classifier_association = query.filter_by(
- portchain_id=port_chain.id, flowclassifier_id=fc_id
- ).first()
- if not chain_classifier_association:
- chain_classifier_association = ChainClassifierAssoc(
- flowclassifier_id=fc_id
- )
- chain_classifier_associations.append(
- chain_classifier_association)
- port_chain.chain_classifier_associations = (
- chain_classifier_associations)
-
- @log_helpers.log_method_call
- def create_port_chain(self, context, port_chain):
- """Create a port chain."""
- pc = port_chain['port_chain']
- tenant_id = self._get_tenant_id_for_create(context, pc)
- with context.session.begin(subtransactions=True):
- chain_parameters = {
- key: ChainParameter(keyword=key, value=val)
- for key, val in six.iteritems(pc['chain_parameters'])}
-
- pg_ids = pc['port_pair_groups']
- for pg_id in pg_ids:
- self._get_port_pair_group(context, pg_id)
- fc_ids = pc['flow_classifiers']
- fcs = [
- self._get_flow_classifier(context, fc_id)
- for fc_id in fc_ids
- ]
- for fc in fcs:
- if fc.chain_classifier_associations:
- raise ext_fc.FlowClassifierInUse(id=fc.id)
-
- self._validate_port_pair_groups(context, pg_ids)
- self._validate_flow_classifiers(context, fc_ids)
- port_chain_db = PortChain(id=uuidutils.generate_uuid(),
- tenant_id=tenant_id,
- description=pc['description'],
- name=pc['name'],
- chain_parameters=chain_parameters)
- self._setup_chain_group_associations(
- context, port_chain_db, pg_ids)
- self._setup_chain_classifier_associations(
- context, port_chain_db, fc_ids)
- context.session.add(port_chain_db)
-
- return self._make_port_chain_dict(port_chain_db)
-
- @log_helpers.log_method_call
- def get_port_chains(self, context, filters=None, fields=None,
- sorts=None, limit=None,
- marker=None, page_reverse=False, default_sg=False):
-
- marker_obj = self._get_marker_obj(context, 'port_chain', limit, marker)
- return self._get_collection(context,
- PortChain,
- self._make_port_chain_dict,
- filters=filters, fields=fields,
- sorts=sorts,
- limit=limit, marker_obj=marker_obj,
- page_reverse=page_reverse)
-
- def get_port_chains_count(self, context, filters=None):
- return self._get_collection_count(context, PortChain,
- filters=filters)
-
- @log_helpers.log_method_call
- def get_port_chain(self, context, id, fields=None):
- portchain = self._get_port_chain(context, id)
- return self._make_port_chain_dict(portchain, fields)
-
- @log_helpers.log_method_call
- def _get_port_chain(self, context, id):
- try:
- return self._get_by_id(context, PortChain, id)
- except exc.NoResultFound:
- raise ext_sfc.PortChainNotFound(id=id)
-
- @log_helpers.log_method_call
- def delete_port_chain(self, context, id):
- try:
- with context.session.begin(subtransactions=True):
- pc = self._get_port_chain(context, id)
- context.session.delete(pc)
- except ext_sfc.PortChainNotFound:
- LOG.info(_LI("Deleting a non-existing port chain."))
-
- @log_helpers.log_method_call
- def update_port_chain(self, context, id, port_chain):
- p = port_chain['port_chain']
- with context.session.begin(subtransactions=True):
- pc = self._get_port_chain(context, id)
- for k, v in six.iteritems(p):
- if k == 'flow_classifiers':
- for fc_id in v:
- self._get_flow_classifier(context, fc_id)
- self._setup_chain_classifier_associations(context, pc, v)
- else:
- pc[k] = v
- return self._make_port_chain_dict(pc)
-
- def _make_port_pair_dict(self, port_pair, fields=None):
- res = {
- 'id': port_pair['id'],
- 'name': port_pair['name'],
- 'description': port_pair['description'],
- 'tenant_id': port_pair['tenant_id'],
- 'ingress': port_pair['ingress'],
- 'egress': port_pair['egress'],
- 'service_function_parameters': {
- param['keyword']: param['value']
- for k, param in six.iteritems(
- port_pair['service_function_parameters'])
- }
- }
-
- return self._fields(res, fields)
-
- def _validate_port_pair_ingress_egress(self, ingress, egress):
- if 'device_id' not in ingress or not ingress['device_id']:
- raise ext_sfc.PortPairIngressNoHost(
- ingress=ingress['id']
- )
- if 'device_id' not in egress or not egress['device_id']:
- raise ext_sfc.PortpairEgressNoHost(
- egress=egress['id']
- )
- if ingress['device_id'] != egress['device_id']:
- raise ext_sfc.PortPairIngressEgressDifferentHost(
- ingress=ingress['id'],
- egress=egress['id'])
-
- @log_helpers.log_method_call
- def create_port_pair(self, context, port_pair):
- """Create a port pair."""
- pp = port_pair['port_pair']
- tenant_id = self._get_tenant_id_for_create(context, pp)
- with context.session.begin(subtransactions=True):
- service_function_parameters = {
- key: ServiceFunctionParam(keyword=key, value=val)
- for key, val in six.iteritems(
- pp['service_function_parameters']
- )
- }
- ingress = self._get_port(context, pp['ingress'])
- egress = self._get_port(context, pp['egress'])
- self._validate_port_pair_ingress_egress(ingress, egress)
- port_pair_db = PortPair(
- id=uuidutils.generate_uuid(),
- name=pp['name'],
- description=pp['description'],
- tenant_id=tenant_id,
- ingress=pp['ingress'],
- egress=pp['egress'],
- service_function_parameters=service_function_parameters
- )
- context.session.add(port_pair_db)
- return self._make_port_pair_dict(port_pair_db)
-
- @log_helpers.log_method_call
- def get_port_pairs(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- marker_obj = self._get_marker_obj(context, 'port_pair',
- limit, marker)
- return self._get_collection(context,
- PortPair,
- self._make_port_pair_dict,
- filters=filters, fields=fields,
- sorts=sorts,
- limit=limit, marker_obj=marker_obj,
- page_reverse=page_reverse)
-
- def get_port_pairs_count(self, context, filters=None):
- return self._get_collection_count(context, PortPair,
- filters=filters)
-
- @log_helpers.log_method_call
- def get_port_pair(self, context, id, fields=None):
- port_pair = self._get_port_pair(context, id)
- return self._make_port_pair_dict(port_pair, fields)
-
- def _get_port_pair(self, context, id):
- try:
- return self._get_by_id(context, PortPair, id)
- except exc.NoResultFound:
- raise ext_sfc.PortPairNotFound(id=id)
-
- def _get_port(self, context, id):
- try:
- return self._get_by_id(context, models_v2.Port, id)
- except exc.NoResultFound:
- raise ext_sfc.PortPairPortNotFound(id=id)
-
- @log_helpers.log_method_call
- def update_port_pair(self, context, id, port_pair):
- new_pp = port_pair['port_pair']
- with context.session.begin(subtransactions=True):
- old_pp = self._get_port_pair(context, id)
- old_pp.update(new_pp)
- return self._make_port_pair_dict(old_pp)
-
- @log_helpers.log_method_call
- def delete_port_pair(self, context, id):
- try:
- with context.session.begin(subtransactions=True):
- pp = self._get_port_pair(context, id)
- if pp.portpairgroup_id:
- raise ext_sfc.PortPairInUse(id=id)
- context.session.delete(pp)
- except ext_sfc.PortPairNotFound:
- LOG.info(_LI("Deleting a non-existing port pair."))
-
- def _make_port_pair_group_dict(self, port_pair_group, fields=None):
- res = {
- 'id': port_pair_group['id'],
- 'name': port_pair_group['name'],
- 'description': port_pair_group['description'],
- 'tenant_id': port_pair_group['tenant_id'],
- 'port_pairs': [pp['id'] for pp in port_pair_group['port_pairs']],
- }
-
- return self._fields(res, fields)
-
- @log_helpers.log_method_call
- def create_port_pair_group(self, context, port_pair_group):
- """Create a port pair group."""
- pg = port_pair_group['port_pair_group']
- tenant_id = self._get_tenant_id_for_create(context, pg)
-
- with context.session.begin(subtransactions=True):
- portpairs_list = [self._get_port_pair(context, pp_id)
- for pp_id in pg['port_pairs']]
- for portpair in portpairs_list:
- if portpair.portpairgroup_id:
- raise ext_sfc.PortPairInUse(id=portpair.id)
- port_pair_group_db = PortPairGroup(
- id=uuidutils.generate_uuid(),
- name=pg['name'],
- description=pg['description'],
- tenant_id=tenant_id,
- port_pairs=portpairs_list)
- context.session.add(port_pair_group_db)
- return self._make_port_pair_group_dict(port_pair_group_db)
-
- @log_helpers.log_method_call
- def get_port_pair_groups(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- marker_obj = self._get_marker_obj(context, 'port_pair_group',
- limit, marker)
- return self._get_collection(context,
- PortPairGroup,
- self._make_port_pair_group_dict,
- filters=filters, fields=fields,
- sorts=sorts,
- limit=limit, marker_obj=marker_obj,
- page_reverse=page_reverse)
-
- def get_port_pair_groups_count(self, context, filters=None):
- return self._get_collection_count(context, PortPairGroup,
- filters=filters)
-
- @log_helpers.log_method_call
- def get_port_pair_group(self, context, id, fields=None):
- port_pair_group = self._get_port_pair_group(context, id)
- return self._make_port_pair_group_dict(port_pair_group, fields)
-
- def _get_port_pair_group(self, context, id):
- try:
- return self._get_by_id(context, PortPairGroup, id)
- except exc.NoResultFound:
- raise ext_sfc.PortPairGroupNotFound(id=id)
-
- def _get_flow_classifier(self, context, id):
- try:
- return self._get_by_id(context, fc_db.FlowClassifier, id)
- except exc.NoResultFound:
- raise ext_fc.FlowClassifierNotFound(id=id)
-
- @log_helpers.log_method_call
- def update_port_pair_group(self, context, id, port_pair_group):
- new_pg = port_pair_group['port_pair_group']
-
- with context.session.begin(subtransactions=True):
- portpairs_list = [self._get_port_pair(context, pp_id)
- for pp_id in new_pg.get('port_pairs', [])]
- for portpair in portpairs_list:
- if (
- portpair.portpairgroup_id and
- portpair.portpairgroup_id != id
- ):
- raise ext_sfc.PortPairInUse(id=portpair.id)
-
- old_pg = self._get_port_pair_group(context, id)
- for k, v in six.iteritems(new_pg):
- if k == 'port_pairs':
- port_pairs = [
- self._get_port_pair(context, pp_id)
- for pp_id in v
- ]
- old_pg.port_pairs = port_pairs
- else:
- old_pg[k] = v
-
- return self._make_port_pair_group_dict(old_pg)
-
- @log_helpers.log_method_call
- def delete_port_pair_group(self, context, id):
- try:
- with context.session.begin(subtransactions=True):
- pg = self._get_port_pair_group(context, id)
- if pg.chain_group_associations:
- raise ext_sfc.PortPairGroupInUse(id=id)
- context.session.delete(pg)
- except ext_sfc.PortPairGroupNotFound:
- LOG.info(_LI("Deleting a non-existing port pair group."))
diff --git a/networking_sfc/extensions/__init__.py b/networking_sfc/extensions/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/extensions/__init__.py
+++ /dev/null
diff --git a/networking_sfc/extensions/flowclassifier.py b/networking_sfc/extensions/flowclassifier.py
deleted file mode 100644
index 93d2284..0000000
--- a/networking_sfc/extensions/flowclassifier.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from abc import ABCMeta
-from abc import abstractmethod
-
-import six
-
-from oslo_config import cfg
-
-from neutron.api import extensions as neutron_ext
-from neutron.api.v2 import attributes as attr
-from neutron.api.v2 import resource_helper
-from neutron.common import constants as const
-from neutron.common import exceptions as neutron_exc
-from neutron.services import service_base
-
-import networking_sfc
-
-
-cfg.CONF.import_opt('api_extensions_path', 'neutron.common.config')
-neutron_ext.append_api_extensions_path(networking_sfc.extensions.__path__)
-FLOW_CLASSIFIER_EXT = "flow_classifier"
-FLOW_CLASSIFIER_PREFIX = "/sfc"
-
-fc_supported_protocols = [const.PROTO_NAME_TCP,
- const.PROTO_NAME_UDP, const.PROTO_NAME_ICMP]
-fc_supported_ethertypes = ['IPv4', 'IPv6']
-SUPPORTED_L7_PARAMETERS = []
-DEFAULT_L7_PARAMETER = {}
-
-
-# Flow Classifier Exceptions
-class FlowClassifierNotFound(neutron_exc.NotFound):
- message = _("Flow Classifier %(id)s not found.")
-
-
-class FlowClassifierPortNotFound(neutron_exc.NotFound):
- message = _("Flow Classifier Neutron Port %(id)s not found.")
-
-
-class FlowClassifierInvalidPortRange(neutron_exc.InvalidInput):
- message = _("Invalid IP protocol port range. min_port_range="
- "%(port_range_min)s must be lesser or equal to "
- "max_port_range=%(port_range_max)s.")
-
-
-class FlowClassifierInvalidPortValue(neutron_exc.InvalidInput):
- message = _("Flow Classifier has invalid port value %(port)s")
-
-
-class FlowClassiferDuplicateInformation(neutron_exc.InvalidInput):
- message = _("Flow Classfier has duplicate information: "
- "Neutron Port id %(port_id)s and ip prefix %(ip_prefix)s")
-
-
-class FlowClassifierInUse(neutron_exc.InUse):
- message = _("Flow Classifier %(id)s in use.")
-
-
-class FlowClassifierInvalidProtocol(neutron_exc.InvalidInput):
- message = _("Flow Classifier does not support protocol %(protocol)s. "
- "Supported protocol values are %(values)s.")
-
-
-class FlowClassifierInvalidEthertype(neutron_exc.InvalidInput):
- message = _("Flow Classifier does not support ethertype %(ethertype)s. "
- "Supported ethertype values are %(values)s.")
-
-
-class FlowClassifierProtocolRequiredWithPorts(neutron_exc.InvalidInput):
- message = _("IP protocol must be TCP or UDP, if port range is given.")
-
-
-class FlowClassifierInvalidL7Parameter(neutron_exc.InvalidInput):
- message = _(
- "Flow classifier does not support L7 parameter "
- "(%%(key)s, %%(value)s). Supported L7 parameters are "
- "%(supported_parameters)s."
- ) % {'supported_parameters': SUPPORTED_L7_PARAMETERS}
-
-
-def normalize_protocol(value):
- if value is None:
- return None
- if isinstance(value, six.string_types):
- if value.lower() in fc_supported_protocols:
- return value.lower()
- raise FlowClassifierInvalidProtocol(
- protocol=value, values=fc_supported_protocols)
-
-
-def normalize_ethertype(value):
- if value is None:
- return 'IPv4'
- if isinstance(value, six.string_types):
- for ether_type in fc_supported_ethertypes:
- if value.lower() == ether_type.lower():
- return ether_type
- raise FlowClassifierInvalidEthertype(
- ethertype=value, values=fc_supported_ethertypes)
-
-
-def normalize_string(value):
- if value is None:
- return ''
- return value
-
-
-def normalize_port_value(port):
- if port is None:
- return None
- try:
- val = int(port)
- except (ValueError, TypeError):
- raise FlowClassifierInvalidPortValue(port=port)
-
- if 0 <= val <= 65535:
- return val
- else:
- raise FlowClassifierInvalidPortValue(port=port)
-
-
-def normalize_l7parameters(parameters):
- parameters = attr.convert_none_to_empty_dict(parameters)
- if not parameters:
- return DEFAULT_L7_PARAMETER
- for key, value in six.iteritems(parameters):
- if (key, value) not in SUPPORTED_L7_PARAMETERS:
- raise FlowClassifierInvalidL7Parameter(key=key, value=value)
- return parameters
-
-
-# Attribute Map
-RESOURCE_ATTRIBUTE_MAP = {
- 'flow_classifiers': {
- 'id': {
- 'allow_post': False, 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:uuid': None},
- 'primary_key': True},
- 'name': {
- 'allow_post': True, 'allow_put': True,
- 'is_visible': True, 'default': None,
- 'validate': {'type:string': attr.NAME_MAX_LEN},
- 'convert_to': normalize_string},
- 'description': {
- 'allow_post': True, 'allow_put': True,
- 'is_visible': True, 'default': None,
- 'validate': {'type:string': attr.DESCRIPTION_MAX_LEN},
- 'convert_to': normalize_string},
- 'tenant_id': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
- 'required_by_policy': True},
- 'ethertype': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True, 'default': None,
- 'convert_to': normalize_ethertype},
- 'protocol': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True, 'default': None,
- 'convert_to': normalize_protocol},
- 'source_port_range_min': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True, 'default': None,
- 'convert_to': normalize_port_value},
- 'source_port_range_max': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True, 'default': None,
- 'convert_to': normalize_port_value},
- 'destination_port_range_min': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True, 'default': None,
- 'convert_to': normalize_port_value},
- 'destination_port_range_max': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True, 'default': None,
- 'convert_to': normalize_port_value},
- 'source_ip_prefix': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True, 'default': None,
- 'validate': {'type:subnet_or_none': None}},
- 'destination_ip_prefix': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True, 'default': None,
- 'validate': {'type:subnet_or_none': None}},
- 'logical_source_port': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': False, 'default': None,
- 'validate': {'type:uuid_or_none': None}},
- 'logical_destination_port': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': False, 'default': None,
- 'validate': {'type:uuid_or_none': None}},
- 'l7_parameters': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True, 'default': None,
- 'validate': {'type:dict': None},
- 'convert_to': normalize_l7parameters},
- },
-}
-
-flow_classifier_quota_opts = [
- cfg.IntOpt('quota_flow_classifier',
- default=100,
- help=_('Maximum number of flow classifiers per tenant. '
- 'A negative value means unlimited.')),
-]
-cfg.CONF.register_opts(flow_classifier_quota_opts, 'QUOTAS')
-
-
-class Flowclassifier(neutron_ext.ExtensionDescriptor):
- """Flow Classifier extension."""
-
- @classmethod
- def get_name(cls):
- return FLOW_CLASSIFIER_EXT
-
- @classmethod
- def get_alias(cls):
- return FLOW_CLASSIFIER_EXT
-
- @classmethod
- def get_description(cls):
- return "Flow Classifier Extension."
-
- @classmethod
- def get_plugin_interface(cls):
- return FlowClassifierPluginBase
-
- @classmethod
- def get_updated(cls):
- return "2015-10-05T10:00:00-00:00"
-
- def update_attributes_map(self, attributes):
- super(Flowclassifier, self).update_attributes_map(
- attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
-
- @classmethod
- def get_resources(cls):
- """Returns Ext Resources."""
- plural_mappings = resource_helper.build_plural_mappings(
- {}, RESOURCE_ATTRIBUTE_MAP)
- plural_mappings['flow_classifiers'] = 'flow_classifier'
- attr.PLURALS.update(plural_mappings)
- return resource_helper.build_resource_info(
- plural_mappings,
- RESOURCE_ATTRIBUTE_MAP,
- FLOW_CLASSIFIER_EXT,
- register_quota=True)
-
- def get_extended_resources(self, version):
- if version == "2.0":
- return RESOURCE_ATTRIBUTE_MAP
- else:
- return {}
-
-
-@six.add_metaclass(ABCMeta)
-class FlowClassifierPluginBase(service_base.ServicePluginBase):
-
- def get_plugin_name(self):
- return FLOW_CLASSIFIER_EXT
-
- def get_plugin_type(self):
- return FLOW_CLASSIFIER_EXT
-
- def get_plugin_description(self):
- return 'Flow classifier plugin'
-
- @abstractmethod
- def create_flow_classifier(self, context, flow_classifier):
- pass
-
- @abstractmethod
- def update_flow_classifier(self, context, id, flow_classifier):
- pass
-
- @abstractmethod
- def delete_flow_classifier(self, context, id):
- pass
-
- @abstractmethod
- def get_flow_classifiers(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- pass
-
- @abstractmethod
- def get_flow_classifier(self, context, id, fields=None):
- pass
diff --git a/networking_sfc/extensions/sfc.py b/networking_sfc/extensions/sfc.py
deleted file mode 100644
index 67808b3..0000000
--- a/networking_sfc/extensions/sfc.py
+++ /dev/null
@@ -1,382 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from abc import ABCMeta
-from abc import abstractmethod
-
-import six
-
-from oslo_config import cfg
-
-from neutron.api import extensions as neutron_ext
-from neutron.api.v2 import attributes as attr
-from neutron.api.v2 import resource_helper
-from neutron.common import exceptions as neutron_exc
-from neutron.services import service_base
-
-import networking_sfc
-
-
-cfg.CONF.import_opt('api_extensions_path', 'neutron.common.config')
-neutron_ext.append_api_extensions_path(networking_sfc.extensions.__path__)
-
-SFC_EXT = "sfc"
-SFC_PREFIX = "/sfc"
-
-SUPPORTED_CHAIN_PARAMETERS = [('correlation', 'mpls')]
-DEFAULT_CHAIN_PARAMETER = {'correlation': 'mpls'}
-SUPPORTED_SF_PARAMETERS = [('correlation', None)]
-DEFAULT_SF_PARAMETER = {'correlation': None}
-
-
-# Port Chain Exceptions
-class PortChainNotFound(neutron_exc.NotFound):
- message = _("Port chain %(id)s not found.")
-
-
-class InvalidChainParameter(neutron_exc.InvalidInput):
- message = _(
- "Chain parameter does not support (%%(key)s, %%(value)s). "
- "Supported chain parameters are %(supported_paramters)s"
- ) % {'supported_paramters': SUPPORTED_CHAIN_PARAMETERS}
-
-
-class InvalidServiceFunctionParameter(neutron_exc.InvalidInput):
- message = _(
- "Service function parameter does not support (%%(key)s, %%(value)s). "
- "Supported service function parameters are %(supported_paramters)s"
- ) % {'supported_paramters': SUPPORTED_SF_PARAMETERS}
-
-
-class PortPairGroupNotSpecified(neutron_exc.InvalidInput):
- message = _("Port pair group is not specified in port chain")
-
-
-class InvalidPortPairGroups(neutron_exc.InUse):
- message = _("Port pair groups %(port_pair_groups)s in use by "
- "port chain %(port_chain)s.")
-
-
-class PortPairPortNotFound(neutron_exc.NotFound):
- message = _("Port pair port %(id)s not found.")
-
-
-class PortPairIngressEgressDifferentHost(neutron_exc.InvalidInput):
- message = _("Port pair inegress port %(ingress)s "
- "egress port %(egress)s not in the same host.")
-
-
-class PortPairIngressNoHost(neutron_exc.InvalidInput):
- message = _("Port pair ingress port %(ingress)s does not "
- "belong to a host.")
-
-
-class PortPairEgressNoHost(neutron_exc.InvalidInput):
- message = _("Port pair egress port %(egress)s does not "
- "belong to a host.")
-
-
-class PortPairNotFound(neutron_exc.NotFound):
- message = _("Port pair %(id)s not found.")
-
-
-class PortPairGroupNotFound(neutron_exc.NotFound):
- message = _("Port pair group %(id)s not found.")
-
-
-class PortPairGroupInUse(neutron_exc.InUse):
- message = _("Port pair group %(id)s in use.")
-
-
-class PortPairInUse(neutron_exc.InUse):
- message = _("Port pair %(id)s in use.")
-
-
-def normalize_string(value):
- if value is None:
- return ''
- return value
-
-
-def normalize_port_pair_groups(port_pair_groups):
- port_pair_groups = attr.convert_none_to_empty_list(port_pair_groups)
- if not port_pair_groups:
- raise PortPairGroupNotSpecified()
- return port_pair_groups
-
-
-def normalize_chain_parameters(parameters):
- parameters = attr.convert_none_to_empty_dict(parameters)
- if not parameters:
- return DEFAULT_CHAIN_PARAMETER
- for key, value in six.iteritems(parameters):
- if (key, value) not in SUPPORTED_CHAIN_PARAMETERS:
- raise InvalidChainParameter(key=key, value=value)
- return parameters
-
-
-def normalize_sf_parameters(parameters):
- parameters = attr.convert_none_to_empty_dict(parameters)
- if not parameters:
- return DEFAULT_SF_PARAMETER
- for key, value in six.iteritems(parameters):
- if (key, value) not in SUPPORTED_SF_PARAMETERS:
- raise InvalidServiceFunctionParameter(key=key, value=value)
- return parameters
-
-
-RESOURCE_ATTRIBUTE_MAP = {
- 'port_pairs': {
- 'id': {
- 'allow_post': False, 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:uuid': None},
- 'primary_key': True},
- 'name': {
- 'allow_post': True, 'allow_put': True,
- 'is_visible': True, 'default': None,
- 'validate': {'type:string': attr.NAME_MAX_LEN},
- 'convert_to': normalize_string},
- 'description': {
- 'allow_post': True, 'allow_put': True,
- 'is_visible': True, 'default': None,
- 'validate': {'type:string': attr.DESCRIPTION_MAX_LEN},
- 'convert_to': normalize_string},
- 'tenant_id': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
- 'required_by_policy': True},
- 'ingress': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:uuid': None}},
- 'egress': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:uuid': None}},
- 'service_function_parameters': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True, 'default': None,
- 'validate': {'type:dict': None},
- 'convert_to': normalize_sf_parameters},
- },
- 'port_chains': {
- 'id': {
- 'allow_post': False, 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:uuid': None},
- 'primary_key': True},
- 'name': {
- 'allow_post': True, 'allow_put': True,
- 'is_visible': True, 'default': None,
- 'validate': {'type:string': attr.NAME_MAX_LEN},
- 'convert_to': normalize_string},
- 'description': {
- 'allow_post': True, 'allow_put': True,
- 'is_visible': True, 'default': None,
- 'validate': {'type:string': attr.DESCRIPTION_MAX_LEN},
- 'convert_to': normalize_string},
- 'tenant_id': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
- 'required_by_policy': True},
- 'port_pair_groups': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:uuid_list': None},
- 'convert_to': normalize_port_pair_groups},
- 'flow_classifiers': {
- 'allow_post': True, 'allow_put': True,
- 'is_visible': True, 'default': None,
- 'validate': {'type:uuid_list': None},
- 'convert_to': attr.convert_none_to_empty_list},
- 'chain_parameters': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True, 'default': None,
- 'validate': {'type:dict': None},
- 'convert_to': normalize_chain_parameters},
- },
- 'port_pair_groups': {
- 'id': {
- 'allow_post': False, 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:uuid': None},
- 'primary_key': True},
- 'name': {
- 'allow_post': True, 'allow_put': True,
- 'is_visible': True, 'default': None,
- 'validate': {'type:string': attr.NAME_MAX_LEN},
- 'convert_to': normalize_string},
- 'description': {
- 'allow_post': True, 'allow_put': True,
- 'is_visible': True, 'default': None,
- 'validate': {'type:string': attr.DESCRIPTION_MAX_LEN},
- 'convert_to': normalize_string},
- 'tenant_id': {
- 'allow_post': True, 'allow_put': False,
- 'is_visible': True,
- 'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
- 'required_by_policy': True},
- 'port_pairs': {
- 'allow_post': True, 'allow_put': True,
- 'is_visible': True, 'default': None,
- 'validate': {'type:uuid_list': None},
- 'convert_to': attr.convert_none_to_empty_list},
- },
-}
-
-sfc_quota_opts = [
- cfg.IntOpt('quota_port_chain',
- default=10,
- help=_('Maximum number of port chains per tenant. '
- 'A negative value means unlimited.')),
- cfg.IntOpt('quota_port_pair_group',
- default=10,
- help=_('maximum number of port pair group per tenant. '
- 'a negative value means unlimited.')),
- cfg.IntOpt('quota_port_pair',
- default=100,
- help=_('maximum number of port pair per tenant. '
- 'a negative value means unlimited.'))
-]
-
-cfg.CONF.register_opts(sfc_quota_opts, 'QUOTAS')
-
-
-class Sfc(neutron_ext.ExtensionDescriptor):
- """Service Function Chain extension."""
-
- @classmethod
- def get_name(cls):
- return SFC_EXT
-
- @classmethod
- def get_alias(cls):
- return SFC_EXT
-
- @classmethod
- def get_description(cls):
- return "service function chains extension."
-
- @classmethod
- def get_plugin_interface(cls):
- return SfcPluginBase
-
- @classmethod
- def get_updated(cls):
- return "2015-10-05T10:00:00-00:00"
-
- def update_attributes_map(self, attributes):
- super(Sfc, self).update_attributes_map(
- attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
-
- @classmethod
- def get_resources(cls):
- """Returns Ext Resources."""
- plural_mappings = resource_helper.build_plural_mappings(
- {}, RESOURCE_ATTRIBUTE_MAP)
- plural_mappings['sfcs'] = 'sfc'
- attr.PLURALS.update(plural_mappings)
- return resource_helper.build_resource_info(
- plural_mappings,
- RESOURCE_ATTRIBUTE_MAP,
- SFC_EXT,
- register_quota=True)
-
- def get_extended_resources(self, version):
- if version == "2.0":
- return RESOURCE_ATTRIBUTE_MAP
- else:
- return {}
-
-
-@six.add_metaclass(ABCMeta)
-class SfcPluginBase(service_base.ServicePluginBase):
-
- def get_plugin_name(self):
- return SFC_EXT
-
- def get_plugin_type(self):
- return SFC_EXT
-
- def get_plugin_description(self):
- return 'SFC service plugin for service chaining'
-
- @abstractmethod
- def create_port_chain(self, context, port_chain):
- pass
-
- @abstractmethod
- def update_port_chain(self, context, id, port_chain):
- pass
-
- @abstractmethod
- def delete_port_chain(self, context, id):
- pass
-
- @abstractmethod
- def get_port_chains(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- pass
-
- @abstractmethod
- def get_port_chain(self, context, id, fields=None):
- pass
-
- @abstractmethod
- def create_port_pair_group(self, context, port_pair_group):
- pass
-
- @abstractmethod
- def update_port_pair_group(self, context, id, port_pair_group):
- pass
-
- @abstractmethod
- def delete_port_pair_group(self, context, id):
- pass
-
- @abstractmethod
- def get_port_pair_groups(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- pass
-
- @abstractmethod
- def get_port_pair_group(self, context, id, fields=None):
- pass
-
- @abstractmethod
- def create_port_pair(self, context, port_pair):
- pass
-
- @abstractmethod
- def update_port_pair(self, context, id, port_pair):
- pass
-
- @abstractmethod
- def delete_port_pair(self, context, id):
- pass
-
- @abstractmethod
- def get_port_pairs(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- pass
-
- @abstractmethod
- def get_port_pair(self, context, id, fields=None):
- pass
diff --git a/networking_sfc/services/__init__.py b/networking_sfc/services/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/services/__init__.py
+++ /dev/null
diff --git a/networking_sfc/services/flowclassifier/__init__.py b/networking_sfc/services/flowclassifier/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/services/flowclassifier/__init__.py
+++ /dev/null
diff --git a/networking_sfc/services/flowclassifier/common/__init__.py b/networking_sfc/services/flowclassifier/common/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/services/flowclassifier/common/__init__.py
+++ /dev/null
diff --git a/networking_sfc/services/flowclassifier/common/config.py b/networking_sfc/services/flowclassifier/common/config.py
deleted file mode 100644
index ed2496f..0000000
--- a/networking_sfc/services/flowclassifier/common/config.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-
-
-FLOWCLASSIFIER_DRIVER_OPTS = [
- cfg.ListOpt('drivers',
- default=['dummy'],
- help=_("An ordered list of flow classifier drivers "
- "entrypoints to be loaded from the "
- "networking_sfc.flowclassifier.drivers namespace.")),
-]
-
-
-cfg.CONF.register_opts(FLOWCLASSIFIER_DRIVER_OPTS, "flowclassifier")
diff --git a/networking_sfc/services/flowclassifier/common/context.py b/networking_sfc/services/flowclassifier/common/context.py
deleted file mode 100644
index d873077..0000000
--- a/networking_sfc/services/flowclassifier/common/context.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-class FlowClassifierPluginContext(object):
- """Flow Classifier context base class."""
- def __init__(self, plugin, plugin_context):
- self._plugin = plugin
- self._plugin_context = plugin_context
-
-
-class FlowClassifierContext(FlowClassifierPluginContext):
-
- def __init__(self, plugin, plugin_context, flowclassifier,
- original_flowclassifier=None):
- super(FlowClassifierContext, self).__init__(plugin, plugin_context)
- self._flowclassifier = flowclassifier
- self._original_flowclassifier = original_flowclassifier
-
- @property
- def current(self):
- return self._flowclassifier
-
- @property
- def original(self):
- return self._original_flowclassifier
diff --git a/networking_sfc/services/flowclassifier/common/exceptions.py b/networking_sfc/services/flowclassifier/common/exceptions.py
deleted file mode 100644
index 9f186c0..0000000
--- a/networking_sfc/services/flowclassifier/common/exceptions.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Exceptions used by FlowClassifier plugin and drivers."""
-
-from neutron.common import exceptions
-
-
-class FlowClassifierDriverError(exceptions.NeutronException):
- """flow classifier driver call failed."""
- message = _("%(method)s failed.")
-
-
-class FlowClassifierException(exceptions.NeutronException):
- """Base for flow classifier driver exceptions returned to user."""
- pass
-
-
-class FlowClassifierBadRequest(exceptions.BadRequest, FlowClassifierException):
- """Base for flow classifier driver bad request exceptions."""
- pass
diff --git a/networking_sfc/services/flowclassifier/driver_manager.py b/networking_sfc/services/flowclassifier/driver_manager.py
deleted file mode 100644
index 1af4470..0000000
--- a/networking_sfc/services/flowclassifier/driver_manager.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-from oslo_log import log
-import stevedore
-
-from neutron.i18n import _LE
-from neutron.i18n import _LI
-
-from networking_sfc.services.flowclassifier.common import exceptions as fc_exc
-
-
-LOG = log.getLogger(__name__)
-cfg.CONF.import_opt('drivers',
- 'networking_sfc.services.flowclassifier.common.config',
- group='flowclassifier')
-
-
-class FlowClassifierDriverManager(stevedore.named.NamedExtensionManager):
- """Implementation of Flow Classifier drivers."""
-
- def __init__(self):
- # Registered flow classifier drivers, keyed by name.
- self.drivers = {}
- # Ordered list of flow classifier drivers, defining
- # the order in which the drivers are called.
- self.ordered_drivers = []
- LOG.info(_LI("Configured Flow Classifier drivers: %s"),
- cfg.CONF.flowclassifier.drivers)
- super(FlowClassifierDriverManager, self).__init__(
- 'networking_sfc.flowclassifier.drivers',
- cfg.CONF.flowclassifier.drivers,
- invoke_on_load=True,
- name_order=True)
- LOG.info(_LI("Loaded Flow Classifier drivers: %s"),
- self.names())
- self._register_drivers()
-
- def _register_drivers(self):
- """Register all Flow Classifier drivers.
-
- This method should only be called once in the
- FlowClassifierDriverManager constructor.
- """
- for ext in self:
- self.drivers[ext.name] = ext
- self.ordered_drivers.append(ext)
- LOG.info(_LI("Registered Flow Classifier drivers: %s"),
- [driver.name for driver in self.ordered_drivers])
-
- def initialize(self):
- # ServiceChain bulk operations requires each driver to support them
- self.native_bulk_support = True
- for driver in self.ordered_drivers:
- LOG.info(_LI("Initializing Flow Classifier driver '%s'"),
- driver.name)
- driver.obj.initialize()
- self.native_bulk_support &= getattr(driver.obj,
- 'native_bulk_support', True)
-
- def _call_drivers(self, method_name, context):
- """Helper method for calling a method across all drivers.
-
- :param method_name: name of the method to call
- :param context: context parameter to pass to each method call
- :param continue_on_failure: whether or not to continue to call
- all SFC drivers once one has raised an exception
- if any Flow Classifier driver call fails.
- """
- for driver in self.ordered_drivers:
- try:
- getattr(driver.obj, method_name)(context)
- except Exception as e:
- # This is an internal failure.
- LOG.exception(e)
- LOG.error(
- _LE("Flow Classifier driver '%(name)s'"
- "failed in %(method)s"),
- {'name': driver.name, 'method': method_name}
- )
- raise fc_exc.FlowClassifierDriverError(
- method=method_name
- )
-
- def create_flow_classifier(self, context):
- self._call_drivers("create_flow_classifier", context)
-
- def update_flow_classifier(self, context):
- self._call_drivers("update_flow_classifier", context)
-
- def delete_flow_classifier(self, context):
- self._call_drivers("delete_flow_classifier", context)
diff --git a/networking_sfc/services/flowclassifier/drivers/__init__.py b/networking_sfc/services/flowclassifier/drivers/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/services/flowclassifier/drivers/__init__.py
+++ /dev/null
diff --git a/networking_sfc/services/flowclassifier/drivers/base.py b/networking_sfc/services/flowclassifier/drivers/base.py
deleted file mode 100644
index eeaa60a..0000000
--- a/networking_sfc/services/flowclassifier/drivers/base.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-import six
-
-
-@six.add_metaclass(abc.ABCMeta)
-class FlowClassifierDriverBase(object):
- """Flow Classifier Driver Base Class."""
-
- @abc.abstractmethod
- def create_flow_classifier(self, context):
- pass
-
- @abc.abstractmethod
- def delete_flow_classifier(self, context):
- pass
-
- @abc.abstractmethod
- def update_flow_classifier(self, context):
- pass
diff --git a/networking_sfc/services/flowclassifier/drivers/dummy/__init__.py b/networking_sfc/services/flowclassifier/drivers/dummy/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/services/flowclassifier/drivers/dummy/__init__.py
+++ /dev/null
diff --git a/networking_sfc/services/flowclassifier/drivers/dummy/dummy.py b/networking_sfc/services/flowclassifier/drivers/dummy/dummy.py
deleted file mode 100644
index d032cc9..0000000
--- a/networking_sfc/services/flowclassifier/drivers/dummy/dummy.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import helpers as log_helpers
-
-from networking_sfc.services.flowclassifier.drivers import base as fc_driver
-
-
-class DummyDriver(fc_driver.FlowClassifierDriverBase):
- """Flow Classifier Driver Dummy Class."""
- def initialize(self):
- pass
-
- @log_helpers.log_method_call
- def create_flow_classifier(self, context):
- pass
-
- @log_helpers.log_method_call
- def update_flow_classifier(self, context):
- pass
-
- @log_helpers.log_method_call
- def delete_flow_classifier(self, context):
- pass
diff --git a/networking_sfc/services/flowclassifier/plugin.py b/networking_sfc/services/flowclassifier/plugin.py
deleted file mode 100644
index 692e1d8..0000000
--- a/networking_sfc/services/flowclassifier/plugin.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import helpers as log_helpers
-from oslo_log import log as logging
-from oslo_utils import excutils
-
-from neutron.i18n import _LE
-from neutron import manager
-
-from networking_sfc.db import flowclassifier_db as fc_db
-from networking_sfc.extensions import flowclassifier as fc_ext
-from networking_sfc.services.flowclassifier.common import context as fc_ctx
-from networking_sfc.services.flowclassifier.common import exceptions as fc_exc
-from networking_sfc.services.flowclassifier import driver_manager as fc_driver
-
-
-LOG = logging.getLogger(__name__)
-
-
-class FlowClassifierPlugin(fc_db.FlowClassifierDbPlugin):
-
- """Implementation of the Plugin."""
- supported_extension_aliases = [fc_ext.FLOW_CLASSIFIER_EXT]
- path_prefix = fc_ext.FLOW_CLASSIFIER_PREFIX
-
- def __init__(self):
- self.driver_manager = fc_driver.FlowClassifierDriverManager()
- super(FlowClassifierPlugin, self).__init__()
- self.driver_manager.initialize()
-
- def _get_port(self, context, id):
- port = super(FlowClassifierPlugin, self)._get_port(context, id)
- core_plugin = manager.NeutronManager.get_plugin()
- return core_plugin.get_port(context, port['id'])
-
- def _get_fixed_ip_from_port(self, context, logical_port, ip_prefix):
- if logical_port is not None:
- port = self._get_port(context, logical_port)
- if (
- ip_prefix is None and
- 'fixed_ips' in port and
- port['fixed_ips']
- ):
- for fixed_ip in port['fixed_ips']:
- ip_prefix = (
- '%s/32' % fixed_ip['ip_address']
- )
- break
- return ip_prefix
-
- @log_helpers.log_method_call
- def create_flow_classifier(self, context, flow_classifier):
- fc_db = super(FlowClassifierPlugin, self).create_flow_classifier(
- context, flow_classifier)
- fc_db_context = fc_ctx.FlowClassifierContext(self, context, fc_db)
- try:
- self.driver_manager.create_flow_classifier(fc_db_context)
- except fc_exc.FlowClassifierDriverError as e:
- LOG.exception(e)
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Create flow classifier failed, "
- "deleting flow_classifier '%s'"),
- fc_db['id'])
- self.delete_flow_classifier(context, fc_db['id'])
- return fc_db
-
- @log_helpers.log_method_call
- def update_flow_classifier(self, context, id, flow_classifier):
- original_flowclassifier = self.get_flow_classifier(context, id)
- updated_fc = super(FlowClassifierPlugin, self).update_flow_classifier(
- context, id, flow_classifier)
- fc_db_context = fc_ctx.FlowClassifierContext(
- self, context, updated_fc,
- original_flowclassifier=original_flowclassifier)
-
- try:
- self.driver_manager.update_flow_classifier(fc_db_context)
- except fc_exc.FlowClassifierDriverError as e:
- LOG.exception(e)
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Update flow classifier failed, "
- "flow_classifier '%s'"),
- updated_fc['id'])
-
- return updated_fc
-
- @log_helpers.log_method_call
- def delete_flow_classifier(self, context, fc_id):
- fc = self.get_flow_classifier(context, fc_id)
- fc_context = fc_ctx.FlowClassifierContext(self, context, fc)
- try:
- self.driver_manager.delete_flow_classifier(fc_context)
- except fc_exc.FlowClassfierDriverError as e:
- LOG.exception(e)
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Delete port pair group failed, "
- "flow_classifier '%s'"),
- fc_id)
-
- super(FlowClassifierPlugin, self).delete_flow_classifier(
- context, fc_id)
diff --git a/networking_sfc/services/sfc/__init__.py b/networking_sfc/services/sfc/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/services/sfc/__init__.py
+++ /dev/null
diff --git a/networking_sfc/services/sfc/agent/__init__.py b/networking_sfc/services/sfc/agent/__init__.py
deleted file mode 100644
index 626812b..0000000
--- a/networking_sfc/services/sfc/agent/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from neutron.common import eventlet_utils
-eventlet_utils.monkey_patch()
diff --git a/networking_sfc/services/sfc/agent/agent.py b/networking_sfc/services/sfc/agent/agent.py
deleted file mode 100644
index 2537f9b..0000000
--- a/networking_sfc/services/sfc/agent/agent.py
+++ /dev/null
@@ -1,891 +0,0 @@
-# Copyright 2015 Huawei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import signal
-import six
-import sys
-
-from neutron.agent.common import config
-from neutron.agent.linux import ip_lib
-
-from networking_sfc.services.sfc.agent import br_int
-from networking_sfc.services.sfc.agent import br_phys
-from networking_sfc.services.sfc.agent import br_tun
-from networking_sfc.services.sfc.common import ovs_ext_lib
-from networking_sfc.services.sfc.drivers.ovs import constants
-from networking_sfc.services.sfc.drivers.ovs import rpc_topics as sfc_topics
-
-from neutron.agent import rpc as agent_rpc
-from neutron.common import config as common_config
-from neutron.common import constants as n_const
-from neutron.common import exceptions
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.common import utils as q_utils
-from neutron.i18n import _LE
-from neutron.i18n import _LI
-from neutron.i18n import _LW
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import (
- constants as ovs_const)
-from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging
-
-
-LOG = logging.getLogger(__name__)
-
-agent_opts = [
- cfg.StrOpt('sfc_encap_mode', default='mpls',
- help=_("The encapsulation mode of sfc.")),
-]
-
-cfg.CONF.register_opts(agent_opts, "AGENT")
-
-# This table is used to process the traffic across differet subnet scenario.
-# Flow 1: pri=1, ip,dl_dst=nexthop_mac,nw_src=nexthop_subnet. actions=
-# push_mpls:0x8847,set_mpls_label,set_mpls_ttl,push_vlan,output:(patch port
-# or resubmit to table(INGRESS_TABLE)
-# Flow 2: pri=0, ip,dl_dst=nexthop_mac,, action=push_mpls:0x8847,
-# set_mpls_label,set_mpls_ttl,push_vlan,output:(patch port or resubmit to
-# table(INGRESS_TABLE)
-ACROSS_SUBNET_TABLE = 5
-
-# The table has multiple flows that steer traffic for the different chains
-# to the ingress port of different service functions hosted on this Compute
-# node.
-INGRESS_TABLE = 10
-
-# port chain default flow rule priority
-PC_DEF_PRI = 20
-PC_INGRESS_PRI = 30
-
-
-class FeatureSupportError(exceptions.NeutronException):
- message = _("Current sfc agent don't support %(feature)s ")
-
-
-class SfcPluginApi(object):
- def __init__(self, topic, host):
- self.host = host
- self.target = oslo_messaging.Target(topic=topic, version='1.0')
- self.client = n_rpc.get_client(self.target)
-
- def update_flowrules_status(self, context, flowrules_status):
- cctxt = self.client.prepare()
- return cctxt.call(
- context, 'update_flowrules_status',
- flowrules_status=flowrules_status)
-
- def get_flowrules_by_host_portid(self, context, port_id):
- cctxt = self.client.prepare()
- return cctxt.call(
- context, 'get_flowrules_by_host_portid',
- host=self.host, port_id=port_id)
-
- def get_all_src_node_flowrules(self, context):
- cctxt = self.client.prepare()
- return cctxt.call(
- context, 'get_all_src_node_flowrules',
- host=self.host)
-
-
-class OVSSfcAgent(ovs_neutron_agent.OVSNeutronAgent):
- # history
- # 1.0 Initial version
- """This class will support MPLS frame
-
- Ethernet + MPLS
- IPv4 Packet:
- +-------------------------------+---------------+--------------------+
- |Outer Ethernet, ET=0x8847 | MPLS head, | original IP Packet |
- +-------------------------------+---------------+--------------------+
- """
-
- target = oslo_messaging.Target(version='1.0')
-
- def __init__(self, bridge_classes, integ_br, tun_br, local_ip,
- bridge_mappings, polling_interval, tunnel_types=None,
- veth_mtu=None, l2_population=False,
- enable_distributed_routing=False,
- minimize_polling=False,
- ovsdb_monitor_respawn_interval=(
- ovs_const.DEFAULT_OVSDBMON_RESPAWN),
- arp_responder=False,
- prevent_arp_spoofing=True,
- use_veth_interconnection=False,
- quitting_rpc_timeout=None):
-
- """to get network info from ovs agent."""
- super(OVSSfcAgent, self).__init__(
- bridge_classes, integ_br, tun_br,
- local_ip,
- bridge_mappings, polling_interval, tunnel_types,
- veth_mtu, l2_population,
- enable_distributed_routing,
- minimize_polling,
- ovsdb_monitor_respawn_interval,
- arp_responder,
- prevent_arp_spoofing,
- use_veth_interconnection,
- quitting_rpc_timeout)
-
- self.overlay_encap_mode = cfg.CONF.AGENT.sfc_encap_mode
- self._sfc_setup_rpc()
-
- if self.overlay_encap_mode == 'eth_nsh':
- raise FeatureSupportError(feature=self.overlay_encap_mode)
- elif self.overlay_encap_mode == 'vxlan_nsh':
- raise FeatureSupportError(feature=self.overlay_encap_mode)
- elif self.overlay_encap_mode == 'mpls':
- self._clear_sfc_flow_on_int_br()
- self._setup_src_node_flow_rules_with_mpls()
-
- def _sfc_setup_rpc(self):
- self.sfc_plugin_rpc = SfcPluginApi(
- sfc_topics.SFC_PLUGIN, cfg.CONF.host)
-
- self.topic = sfc_topics.SFC_AGENT
- self.endpoints = [self]
- consumers = [
- [sfc_topics.PORTFLOW, topics.UPDATE],
- [sfc_topics.PORTFLOW, topics.DELETE]
- ]
-
- # subscribe sfc plugin message
- self.connection = agent_rpc.create_consumers(
- self.endpoints,
- self.topic,
- consumers)
-
- def _parse_flow_classifier(self, flow_classifier):
- dl_type, nw_proto, source_port_masks, destination_port_masks = (
- (None, ) * 4)
-
- if (
- not flow_classifier['source_port_range_min'] and
- not flow_classifier['source_port_range_max']
- ):
- # wildcard
- source_port_masks = ['0/0x0']
- elif not flow_classifier['source_port_range_min']:
- source_port_masks = ovs_ext_lib.get_port_mask(
- 1,
- flow_classifier['source_port_range_max'])
- elif not flow_classifier['source_port_range_max']:
- source_port_masks = ovs_ext_lib.get_port_mask(
- flow_classifier['source_port_range_min'],
- 65535)
- else:
- source_port_masks = ovs_ext_lib.get_port_mask(
- flow_classifier['source_port_range_min'],
- flow_classifier['source_port_range_max'])
-
- if (
- not flow_classifier['destination_port_range_min'] and
- not flow_classifier['destination_port_range_max']
- ):
- # wildcard
- destination_port_masks = ['0/0x0']
- elif not flow_classifier['destination_port_range_min']:
- destination_port_masks = ovs_ext_lib.get_port_mask(
- 1,
- flow_classifier['destination_port_range_max'])
- elif not flow_classifier['destination_port_range_max']:
- destination_port_masks = ovs_ext_lib.get_port_mask(
- flow_classifier['destination_port_range_min'],
- 65535)
- else:
- destination_port_masks = ovs_ext_lib.get_port_mask(
- flow_classifier['destination_port_range_min'],
- flow_classifier['destination_port_range_max'])
-
- if "IPv4" == flow_classifier['ethertype']:
- dl_type = 0x0800
- if n_const.PROTO_NAME_TCP == flow_classifier['protocol']:
- nw_proto = n_const.PROTO_NUM_TCP
- elif n_const.PROTO_NAME_UDP == flow_classifier['protocol']:
- nw_proto = n_const.PROTO_NUM_UDP
- elif n_const.PROTO_NAME_ICMP == flow_classifier['protocol']:
- nw_proto = n_const.PROTO_NUM_ICMP
- else:
- nw_proto = None
- elif "IPv6" == flow_classifier['ethertype']:
- LOG.error(_LE("Current portchain agent don't support Ipv6"))
- else:
- LOG.error(_LE("invalid protocol input"))
- return (dl_type, nw_proto,
- source_port_masks, destination_port_masks
- )
-
- def _clear_sfc_flow_on_int_br(self):
- self.int_br.delete_group(group_id='all')
- self.int_br.delete_flows(table=ACROSS_SUBNET_TABLE)
- self.int_br.delete_flows(table=INGRESS_TABLE)
- self.int_br.install_goto(dest_table_id=INGRESS_TABLE,
- priority=PC_DEF_PRI,
- dl_type=0x8847)
- self.int_br.install_drop(table_id=INGRESS_TABLE)
-
- def _get_flow_infos_from_flow_classifier(self, flow_classifier):
- flow_infos = []
- nw_src, nw_dst = ((None, ) * 2)
-
- if "IPv4" != flow_classifier['ethertype']:
- LOG.error(_LE("Current portchain agent don't support Ipv6"))
- return flow_infos
-
- # parse and transfer flow info to match field info
- dl_type, nw_proto, source_port_masks, destination_port_masks = (
- self._parse_flow_classifier(flow_classifier))
-
- if flow_classifier['source_ip_prefix']:
- nw_src = flow_classifier['source_ip_prefix']
- else:
- nw_src = '0.0.0.0/0.0.0.0'
- if flow_classifier['destination_ip_prefix']:
- nw_dst = flow_classifier['destination_ip_prefix']
- else:
- nw_dst = '0.0.0.0/0.0.0.0'
-
- if source_port_masks and destination_port_masks:
- for destination_port in destination_port_masks:
- for source_port in source_port_masks:
- if nw_proto is None:
- flow_infos.append(dict(
- dl_type=dl_type,
- nw_src=nw_src,
- nw_dst=nw_dst,
- tp_src='%s' % source_port,
- tp_dst='%s' % destination_port
- ))
- else:
- flow_infos.append(dict(
- dl_type=dl_type,
- nw_proto=nw_proto,
- nw_src=nw_src,
- nw_dst=nw_dst,
- tp_src='%s' % source_port,
- tp_dst='%s' % destination_port
- ))
-
- return flow_infos
-
- def _get_flow_infos_from_flow_classifier_list(self, flow_classifier_list):
- flow_infos = []
- if not flow_classifier_list:
- return flow_infos
- for flow_classifier in flow_classifier_list:
- flow_infos.extend(
- self._get_flow_infos_from_flow_classifier(flow_classifier)
- )
-
- return flow_infos
-
- def _setup_local_switch_flows_on_int_br(
- self, flowrule, flow_classifier_list,
- actions, add_flow=True, match_inport=True
- ):
- inport_match = {}
- priority = PC_DEF_PRI
-
- if match_inport is True:
- egress_port = self.int_br.get_vif_port_by_id(flowrule['egress'])
- if egress_port:
- inport_match = dict(in_port=egress_port.ofport)
- priority = PC_INGRESS_PRI
-
- for flow_info in self._get_flow_infos_from_flow_classifier_list(
- flow_classifier_list
- ):
- match_info = dict(inport_match, **flow_info)
- if add_flow:
- self.int_br.add_flow(
- table=ovs_const.LOCAL_SWITCHING,
- priority=priority,
- actions=actions, **match_info
- )
- else:
- self.int_br.delete_flows(
- table=ovs_const.LOCAL_SWITCHING,
- **match_info
- )
-
- def _update_destination_ingress_flow_rules(self, flowrule):
- for flow_info in self._get_flow_infos_from_flow_classifier_list(
- flowrule['del_fcs']
- ):
- self.int_br.delete_flows(
- table=ovs_const.LOCAL_SWITCHING,
- in_port=self.patch_tun_ofport,
- **flow_info
- )
- for flow_info in self._get_flow_infos_from_flow_classifier_list(
- flowrule['add_fcs']
- ):
- inport_match = dict(in_port=self.patch_tun_ofport)
- match_info = dict(inport_match, **flow_info)
- self.int_br.install_normal(table_id=ovs_const.LOCAL_SWITCHING,
- priority=PC_INGRESS_PRI,
- **match_info)
-
- def _setup_src_node_flow_rules_with_mpls(self):
- flow_rules = self.sfc_plugin_rpc.get_all_src_node_flowrules(
- self.context)
- if not flow_rules:
- return
- for fr in flow_rules:
- self._setup_egress_flow_rules_with_mpls(fr, False)
- # if the traffic is from patch port, it means the destination
- # is on the this host. so implement normal forward but not
- # match the traffic from the source.
- # Next step need to do is check if the traffic is from vRouter
- # on the local host, also need to implement same normal process.
- self._update_destination_ingress_flow_rules(fr)
-
- def _setup_egress_flow_rules_with_mpls(self, flowrule, match_inport=True):
- network_type = flowrule['network_type']
- group_id = flowrule.get('next_group_id', None)
- next_hops = flowrule.get('next_hops', None)
- segmentation_id = flowrule['segment_id']
-
- if not next_hops:
- return
-
- if network_type not in ovs_const.TUNNEL_NETWORK_TYPES:
- LOG.warn(_LW("currently %s network is not supported,"
- "only support tunnel type"
- ),
- network_type
- )
- return
-
- # if the group is not none, install the egress rule for this SF
- if (
- (flowrule['node_type'] == constants.SRC_NODE or
- flowrule['node_type'] == constants.SF_NODE) and group_id
- ):
- # 1st, install br-int flow rule on table ACROSS_SUBNET_TABLE
- # and group table
- buckets = []
- for item in next_hops:
- if item['net_uuid'] not in self.local_vlan_map:
- self.provision_local_vlan(item['net_uuid'], network_type,
- None, segmentation_id)
- lvm = self.local_vlan_map[item['net_uuid']]
- bucket = (
- 'bucket=weight=%d, mod_dl_dst:%s,'
- 'resubmit(,%d)' % (
- item['weight'],
- item['mac_address'],
- ACROSS_SUBNET_TABLE
- )
- )
- buckets.append(bucket)
-
- no_across_subnet_actions_list = []
- across_subnet_actions_list = []
-
- push_mpls = (
- "push_mpls:0x8847,"
- "set_mpls_label:%d,"
- "set_mpls_ttl:%d,"
- "mod_vlan_vid:%d," %
- ((flowrule['nsp'] << 8) | flowrule['nsi'],
- flowrule['nsi'], lvm.vlan))
-
- no_across_subnet_actions_list.append(push_mpls)
- across_subnet_actions_list.append(push_mpls)
-
- if item['local_endpoint'] == self.local_ip:
- no_across_subnet_actions = (
- "resubmit(,%d)" % INGRESS_TABLE)
- across_subnet_actions = (
- "mod_dl_src:%s, resubmit(,%d)" %
- (item['gw_mac'], INGRESS_TABLE))
- else:
- # same subnet with next hop
- no_across_subnet_actions = ("output:%s" %
- self.patch_tun_ofport)
- across_subnet_actions = ("mod_dl_src:%s, output:%s" %
- (item['gw_mac'],
- self.patch_tun_ofport))
- no_across_subnet_actions_list.append(no_across_subnet_actions)
- across_subnet_actions_list.append(across_subnet_actions)
-
- self.int_br.add_flow(
- table=ACROSS_SUBNET_TABLE,
- priority=1,
- dl_dst=item['mac_address'],
- dl_type=0x0800,
- nw_src=item['cidr'],
- actions="%s" %
- (','.join(no_across_subnet_actions_list)))
- # different subnet with next hop
- self.int_br.add_flow(
- table=ACROSS_SUBNET_TABLE,
- priority=0,
- dl_dst=item['mac_address'],
- actions="%s" %
- (','.join(across_subnet_actions_list)))
-
- buckets = ','.join(buckets)
- group_content = self.int_br.dump_group_for_id(group_id)
- if group_content.find('group_id=%d' % group_id) == -1:
- self.int_br.add_group(group_id=group_id,
- type='select', buckets=buckets)
- else:
- self.int_br.mod_group(group_id=group_id,
- type='select', buckets=buckets)
-
- # 2nd, install br-int flow rule on table 0 for egress traffic
- # for egress traffic
- enc_actions = ("group:%d" % group_id)
- # to uninstall the removed flow classifiers
- self._setup_local_switch_flows_on_int_br(
- flowrule,
- flowrule['del_fcs'],
- None,
- add_flow=False,
- match_inport=match_inport)
- # to install the added flow classifiers
- self._setup_local_switch_flows_on_int_br(
- flowrule,
- flowrule['add_fcs'],
- enc_actions,
- add_flow=True,
- match_inport=match_inport)
-
- def _get_network_by_port(self, port_id):
- for key, val in six.iteritems(self.network_ports):
- if port_id in val:
- return key
-
- return None
-
- def _setup_ingress_flow_rules_with_mpls(self, flowrule):
- network_id = self._get_network_by_port(flowrule['ingress'])
- if network_id:
- # install br-int flow rule on table 0 for ingress traffic
- lvm = self.local_vlan_map[network_id]
- vif_port = lvm.vif_ports[flowrule['ingress']]
- match_field = {}
-
- actions = ("strip_vlan, pop_mpls:0x0800,"
- "output:%s" % vif_port.ofport)
- match_field = dict(
- table=INGRESS_TABLE,
- priority=1,
- dl_dst=vif_port.vif_mac,
- dl_vlan=lvm.vlan,
- dl_type=0x8847,
- mpls_label=flowrule['nsp'] << 8 | (flowrule['nsi'] + 1),
- actions=actions)
-
- self.int_br.add_flow(**match_field)
-
- def _setup_last_egress_flow_rules_with_mpls(self, flowrule):
- group_id = flowrule.get('next_group_id', None)
-
- # check whether user assign the destination neutron port.
- if (
- constants.SF_NODE == flowrule['node_type'] and
- not group_id and
- flowrule['egress'] is not None
- ):
- # to uninstall the new removed flow classifiers
- self._setup_local_switch_flows_on_int_br(
- flowrule,
- flowrule['del_fcs'],
- None,
- add_flow=False,
- match_inport=True
- )
-
- # to install the added flow classifiers
- self._setup_local_switch_flows_on_int_br(
- flowrule,
- flowrule['add_fcs'],
- actions='normal',
- add_flow=True,
- match_inport=True)
-
- def _get_flow_classifier_dest_port_info(self,
- logical_destination_port,
- flowrule):
- for next_hop in flowrule['next_hops']:
- # this flow classifier's destination port should match
- # with the nexthop's ingress port id
- if logical_destination_port in next_hop.values():
- return next_hop
-
- return None
-
- def _update_flow_rules_with_mpls_enc(self, flowrule, flowrule_status):
- try:
- if flowrule.get('egress', None):
- self._setup_egress_flow_rules_with_mpls(flowrule)
- self._setup_last_egress_flow_rules_with_mpls(flowrule)
- if flowrule.get('ingress', None):
- self._setup_ingress_flow_rules_with_mpls(flowrule)
-
- flowrule_status_temp = {}
- flowrule_status_temp['id'] = flowrule['id']
- flowrule_status_temp['status'] = constants.STATUS_ACTIVE
- flowrule_status.append(flowrule_status_temp)
- except Exception as e:
- flowrule_status_temp = {}
- flowrule_status_temp['id'] = flowrule['id']
- flowrule_status_temp['status'] = constants.STATUS_ERROR
- flowrule_status.append(flowrule_status_temp)
- LOG.exception(e)
- LOG.error(_LE("_update_flow_rules_with_mpls_enc failed"))
-
- def _delete_ports_flowrules_by_id(self, ports_id):
- flowrule_status = []
- try:
- LOG.debug("delete_port_id_flows received, ports_id= %s", ports_id)
- count = 0
- if ports_id:
- for port_id in ports_id:
- flowrule = (
- self.sfc_plugin_rpc.get_flowrules_by_host_portid(
- self.context, port_id
- )
- )
- if flowrule:
- self._treat_delete_flow_rules(
- flowrule, flowrule_status)
- LOG.debug(
- "_delete_ports_flowrules_by_id received, count= %s", count)
- except Exception as e:
- LOG.exception(e)
- LOG.error(_LE("delete_port_id_flows failed"))
- if flowrule_status:
- self.sfc_plugin_rpc.update_flowrules_status(
- self.context, flowrule_status)
-
- def _delete_flow_rule_with_mpls_enc(self, flowrule, flowrule_status):
- try:
- LOG.debug("_delete_flow_rule_with_mpls_enc, flowrule = %s",
- flowrule)
- group_id = flowrule.get('next_group_id', None)
-
- # delete tunnel table flow rule on br-int(egress match)
- if flowrule['egress'] is not None:
- self._setup_local_switch_flows_on_int_br(
- flowrule,
- flowrule['del_fcs'],
- None,
- add_flow=False,
- match_inport=True
- )
-
- # delete table INGRESS_TABLE ingress match flow rule
- # on br-int(ingress match)
- network_id = self._get_network_by_port(flowrule['ingress'])
- if network_id:
- # third, install br-int flow rule on table INGRESS_TABLE
- # for ingress traffic
- lvm = self.local_vlan_map[network_id]
- vif_port = lvm.vif_ports[flowrule['ingress']]
- self.int_br.delete_flows(
- table=INGRESS_TABLE,
- dl_type=0x8847,
- dl_dst=vif_port.vif_mac,
- mpls_label=flowrule['nsp'] << 8 | (flowrule['nsi'] + 1)
- )
-
- # delete group table, need to check again
- if group_id and flowrule.get('group_refcnt', None) <= 1:
- self.int_br.delete_group(group_id=group_id)
- for item in flowrule['next_hops']:
- self.int_br.delete_flows(
- table=ACROSS_SUBNET_TABLE,
- dl_dst=item['mac_address'])
- elif (not group_id and
- flowrule['egress'] is not None):
- # to delete last hop flow rule
- for each in flowrule['del_fcs']:
- if each.get('logical_destination_port', None):
- ldp = self._get_flow_classifier_dest_port_info(
- each['logical_destination_port'],
- flowrule
- )
- if ldp:
- self.int_br.delete_flows(
- table=ACROSS_SUBNET_TABLE,
- dl_dst=ldp['mac_address'])
-
- except Exception as e:
- flowrule_status_temp = {}
- flowrule_status_temp['id'] = flowrule['id']
- flowrule_status_temp['status'] = constants.STATUS_ERROR
- flowrule_status.append(flowrule_status_temp)
- LOG.exception(e)
- LOG.error(_LE("_delete_flow_rule_with_mpls_enc failed"))
-
- def _treat_update_flow_rules(self, flowrule, flowrule_status):
- if self.overlay_encap_mode == 'eth_nsh':
- raise FeatureSupportError(feature=self.overlay_encap_mode)
- elif self.overlay_encap_mode == 'vxlan_nsh':
- raise FeatureSupportError(feature=self.overlay_encap_mode)
- elif self.overlay_encap_mode == 'mpls':
- self._update_flow_rules_with_mpls_enc(flowrule, flowrule_status)
-
- def _treat_delete_flow_rules(self, flowrule, flowrule_status):
- if self.overlay_encap_mode == 'eth_nsh':
- raise FeatureSupportError(feature=self.overlay_encap_mode)
- elif self.overlay_encap_mode == 'vxlan_nsh':
- raise FeatureSupportError(feature=self.overlay_encap_mode)
- elif self.overlay_encap_mode == 'mpls':
- self._delete_flow_rule_with_mpls_enc(
- flowrule, flowrule_status)
-
- def update_flow_rules(self, context, **kwargs):
- try:
- flowrule_status = []
- flowrules = kwargs['flowrule_entries']
- LOG.debug("update_flow_rules received, flowrules = %s",
- flowrules)
-
- if flowrules:
- self._treat_update_flow_rules(flowrules, flowrule_status)
- except Exception as e:
- LOG.exception(e)
- LOG.error(_LE("update_flow_rules failed"))
-
- if flowrule_status:
- self.sfc_plugin_rpc.update_flowrules_status(
- self.context, flowrule_status)
-
- def delete_flow_rules(self, context, **kwargs):
- try:
- flowrule_status = []
- flowrules = kwargs['flowrule_entries']
- LOG.debug("delete_flow_rules received, flowrules= %s", flowrules)
- if flowrules:
- self._treat_delete_flow_rules(flowrules, flowrule_status)
- except Exception as e:
- LOG.exception(e)
- LOG.error(_LE("delete_flow_rules failed"))
-
- if flowrule_status:
- self.sfc_plugin_rpc.update_flowrules_status(
- self.context, flowrule_status)
-
- def update_src_node_flow_rules(self, context, **kwargs):
- flowrule = kwargs['flowrule_entries']
- if self.overlay_encap_mode == 'mpls':
- self._setup_egress_flow_rules_with_mpls(flowrule,
- match_inport=False)
- self._update_destination_ingress_flow_rules(flowrule)
-
- def _delete_src_node_flow_rules_with_mpls(self, flowrule,
- match_inport=False):
- LOG.debug("_delete_src_node_flow_rules_with_mpls, flowrule = %s",
- flowrule)
- group_id = flowrule.get('next_group_id', None)
-
- # delete br-int table 0 full match flow
- self._setup_local_switch_flows_on_int_br(
- flowrule,
- flowrule['del_fcs'],
- None,
- add_flow=False,
- match_inport=False)
-
- # delete group table, need to check again
- if None != group_id and flowrule.get('group_refcnt', None) <= 1:
- self.int_br.delete_group(group_id=group_id)
- for item in flowrule['next_hops']:
- self.int_br.delete_flows(
- table=ACROSS_SUBNET_TABLE,
- dl_dst=item['mac_address'])
-
- def delete_src_node_flow_rules(self, context, **kwargs):
- flowrule = kwargs['flowrule_entries']
- if self.overlay_encap_mode == 'mpls':
- self._delete_src_node_flow_rules_with_mpls(flowrule,
- match_inport=False)
- self._update_destination_ingress_flow_rules(flowrule)
-
- def sfc_treat_devices_added_updated(self, port_id):
- resync = False
- flowrule_status = []
- try:
- LOG.debug("a new device %s is found", port_id)
- flows_list = (
- self.sfc_plugin_rpc.get_flowrules_by_host_portid(
- self.context, port_id
- )
- )
- if flows_list:
- for flow in flows_list:
- self._treat_update_flow_rules(flow, flowrule_status)
- except Exception as e:
- LOG.exception(e)
- LOG.error(_LE("portchain_treat_devices_added_updated failed"))
- resync = True
-
- if flowrule_status:
- self.sfc_plugin_rpc.update_flowrules_status(
- self.context, flowrule_status)
-
- return resync
-
- def sfc_treat_devices_removed(self, port_ids):
- resync = False
- for port_id in port_ids:
- LOG.info(_LI("a device %s is removed"), port_id)
- try:
- self._delete_ports_flowrules_by_id(port_id)
- except Exception as e:
- LOG.exception(e)
- LOG.error(
- _LE("delete port flow rule failed for %(port_id)s"),
- {'port_id': port_id}
- )
- resync = True
-
- return resync
-
- def treat_devices_added_or_updated(self, devices, ovs_restarted):
- skipped_devices = []
- need_binding_devices = []
- security_disabled_devices = []
- devices_details_list = (
- self.plugin_rpc.get_devices_details_list_and_failed_devices(
- self.context,
- devices,
- self.agent_id,
- self.conf.host
- )
- )
- if devices_details_list.get('failed_devices'):
- # TODO(rossella_s): handle better the resync in next patches,
- # this is just to preserve the current behavior
- raise ovs_neutron_agent.DeviceListRetrievalError(devices=devices)
-
- devices = devices_details_list.get('devices')
- vif_by_id = self.int_br.get_vifs_by_ids(
- [vif['device'] for vif in devices])
- for details in devices:
- device = details['device']
- LOG.debug("Processing port: %s", device)
- port = vif_by_id.get(device)
- if not port:
- # The port disappeared and cannot be processed
- LOG.info(_LI("Port %s was not found on the integration bridge "
- "and will therefore not be processed"), device)
- skipped_devices.append(device)
- continue
-
- if 'port_id' in details:
- LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
- {'device': device, 'details': details})
- details['vif_port'] = port
- need_binding = self.treat_vif_port(port, details['port_id'],
- details['network_id'],
- details['network_type'],
- details['physical_network'],
- details['segmentation_id'],
- details['admin_state_up'],
- details['fixed_ips'],
- details['device_owner'],
- ovs_restarted)
- if need_binding:
- need_binding_devices.append(details)
-
- port_security = details['port_security_enabled']
- has_sgs = 'security_groups' in details
- if not port_security or not has_sgs:
- security_disabled_devices.append(device)
- self._update_port_network(details['port_id'],
- details['network_id'])
- self.ext_manager.handle_port(self.context, details)
- self.sfc_treat_devices_added_updated(details['port_id'])
- else:
- LOG.warn(_LW("Device %s not defined on plugin"), device)
- if (port and port.ofport != -1):
- self.port_dead(port)
- return skipped_devices, need_binding_devices, security_disabled_devices
-
- def process_deleted_ports(self, port_info):
- # don't try to process removed ports as deleted ports since
- # they are already gone
- if 'removed' in port_info:
- self.deleted_ports -= port_info['removed']
- deleted_ports = list(self.deleted_ports)
- while self.deleted_ports:
- port_id = self.deleted_ports.pop()
- port = self.int_br.get_vif_port_by_id(port_id)
- self._clean_network_ports(port_id)
- self.ext_manager.delete_port(self.context,
- {"vif_port": port,
- "port_id": port_id})
- self.sfc_treat_devices_removed(port_id)
- # move to dead VLAN so deleted ports no
- # longer have access to the network
- if port:
- # don't log errors since there is a chance someone will be
- # removing the port from the bridge at the same time
- self.port_dead(port, log_errors=False)
- self.port_unbound(port_id)
- # Flush firewall rules after ports are put on dead VLAN to be
- # more secure
- self.sg_agent.remove_devices_filter(deleted_ports)
-
-
-def main():
- cfg.CONF.register_opts(ip_lib.OPTS)
- config.register_root_helper(cfg.CONF)
- common_config.init(sys.argv[1:])
- common_config.setup_logging()
- q_utils.log_opt_values(LOG)
-
- try:
- agent_config = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
- except ValueError as e:
- LOG.exception(e)
- LOG.error(_LE('Agent terminated!'))
- sys.exit(1)
-
- is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper
- if is_xen_compute_host:
- # Force ip_lib to always use the root helper to ensure that ip
- # commands target xen dom0 rather than domU.
- cfg.CONF.set_default('ip_lib_force_root', True)
-
- bridge_classes = {
- 'br_int': br_int.OVSIntegrationBridge,
- 'br_phys': br_phys.OVSPhysicalBridge,
- 'br_tun': br_tun.OVSTunnelBridge,
- }
- try:
- agent = OVSSfcAgent(bridge_classes, **agent_config)
- except RuntimeError as e:
- LOG.exception(e)
- LOG.error(_LE("Agent terminated!"))
- sys.exit(1)
- signal.signal(signal.SIGTERM, agent._handle_sigterm)
-
- # Start everything.
- LOG.info(_LI("Agent initialized successfully, now running... "))
- agent.daemon_loop()
-
-
-if __name__ == "__main__":
- main()
diff --git a/networking_sfc/services/sfc/agent/br_int.py b/networking_sfc/services/sfc/agent/br_int.py
deleted file mode 100644
index 1f88c01..0000000
--- a/networking_sfc/services/sfc/agent/br_int.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright 2015 Huawei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-* references
-** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic
-"""
-
-from networking_sfc.services.sfc.common import ovs_ext_lib
-from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl import (
- br_int)
-
-
-class OVSIntegrationBridge(
- br_int.OVSIntegrationBridge,
- ovs_ext_lib.OVSBridgeExt
-):
- def setup_controllers(self, conf):
- self.set_protocols("[]")
- self.del_controller()
-
- def delete_arp_spoofing_protection(self, port):
- # there is an issue to delete icmp6, it will not effect and cause
- # other flow rule get deleted.
- # Raofei will raise a bug to neutron community.
- self.delete_flows(table_id=constants.LOCAL_SWITCHING,
- in_port=port, proto='arp')
- self.delete_flows(table_id=constants.ARP_SPOOF_TABLE,
- in_port=port)
-
- def mod_flow(self, **kwargs):
- ovs_ext_lib.OVSBridgeExt.mod_flow(self, **kwargs)
-
- def run_ofctl(self, cmd, args, process_input=None):
- return ovs_ext_lib.OVSBridgeExt.run_ofctl(
- self, cmd, args, process_input=process_input)
diff --git a/networking_sfc/services/sfc/agent/br_phys.py b/networking_sfc/services/sfc/agent/br_phys.py
deleted file mode 100644
index e9666e9..0000000
--- a/networking_sfc/services/sfc/agent/br_phys.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2015 Huawei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-* references
-** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic
-"""
-from networking_sfc.services.sfc.common import ovs_ext_lib
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl import (
- br_phys)
-
-
-class OVSPhysicalBridge(br_phys.OVSPhysicalBridge, ovs_ext_lib.OVSBridgeExt):
- def setup_controllers(self, conf):
- self.set_protocols("[]")
- self.del_controller()
-
- def mod_flow(self, **kwargs):
- ovs_ext_lib.OVSBridgeExt.mod_flow(self, **kwargs)
-
- def run_ofctl(self, cmd, args, process_input=None):
- return ovs_ext_lib.OVSBridgeExt.run_ofctl(
- self, cmd, args, process_input=process_input)
diff --git a/networking_sfc/services/sfc/agent/br_tun.py b/networking_sfc/services/sfc/agent/br_tun.py
deleted file mode 100644
index 47a7cf9..0000000
--- a/networking_sfc/services/sfc/agent/br_tun.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2015 Huawei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-* references
-** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic
-"""
-
-from networking_sfc.services.sfc.common import ovs_ext_lib
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl import (
- br_tun)
-
-
-class OVSTunnelBridge(br_tun.OVSTunnelBridge, ovs_ext_lib.OVSBridgeExt):
- def setup_controllers(self, conf):
- self.set_protocols("[]")
- self.del_controller()
-
- def mod_flow(self, **kwargs):
- ovs_ext_lib.OVSBridgeExt.mod_flow(self, **kwargs)
-
- def run_ofctl(self, cmd, args, process_input=None):
- return ovs_ext_lib.OVSBridgeExt.run_ofctl(
- self, cmd, args, process_input=process_input)
diff --git a/networking_sfc/services/sfc/common/__init__.py b/networking_sfc/services/sfc/common/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/services/sfc/common/__init__.py
+++ /dev/null
diff --git a/networking_sfc/services/sfc/common/config.py b/networking_sfc/services/sfc/common/config.py
deleted file mode 100644
index 29acd1c..0000000
--- a/networking_sfc/services/sfc/common/config.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-
-
-SFC_DRIVER_OPTS = [
- cfg.ListOpt('drivers',
- default=['dummy'],
- help=_("An ordered list of service chain drivers "
- "entrypoints to be loaded from the "
- "networking_sfc.sfc.drivers namespace.")),
-]
-
-
-cfg.CONF.register_opts(SFC_DRIVER_OPTS, "sfc")
diff --git a/networking_sfc/services/sfc/common/context.py b/networking_sfc/services/sfc/common/context.py
deleted file mode 100644
index 7d3b451..0000000
--- a/networking_sfc/services/sfc/common/context.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-class SfcPluginContext(object):
- """SFC context base class."""
- def __init__(self, plugin, plugin_context):
- self._plugin = plugin
- self._plugin_context = plugin_context
-
-
-class PortChainContext(SfcPluginContext):
-
- def __init__(self, plugin, plugin_context, portchain,
- original_portchain=None):
- super(PortChainContext, self).__init__(plugin, plugin_context)
- self._portchain = portchain
- self._original_portchain = original_portchain
-
- @property
- def current(self):
- return self._portchain
-
- @property
- def original(self):
- return self._original_portchain
-
-
-class FlowClassifierContext(SfcPluginContext):
- def __init__(self, plugin, plugin_context, flowclassifier,
- original_flowclassifier=None):
- super(FlowClassifierContext, self).__init__(plugin, plugin_context)
- self._flowclassifier = flowclassifier
- self._original_flowclassifier = original_flowclassifier
-
- @property
- def current(self):
- return self._flowclassifier
-
- @property
- def original(self):
- return self._original_flowclassifier
-
-
-class PortPairContext(SfcPluginContext):
- def __init__(self, plugin, plugin_context, portpair,
- original_portpair=None):
- super(PortPairContext, self).__init__(plugin, plugin_context)
- self._portpair = portpair
- self._original_portpair = original_portpair
-
- @property
- def current(self):
- return self._portpair
-
- @property
- def original(self):
- return self._original_portpair
-
-
-class PortPairGroupContext(SfcPluginContext):
- def __init__(self, plugin, plugin_context, portpairgroup,
- original_portpairgroup=None):
- super(PortPairGroupContext, self).__init__(plugin, plugin_context)
- self._portpairgroup = portpairgroup
- self._original_portpairgroup = original_portpairgroup
-
- @property
- def current(self):
- return self._portpairgroup
-
- @property
- def original(self):
- return self._original_portpairgroup
diff --git a/networking_sfc/services/sfc/common/exceptions.py b/networking_sfc/services/sfc/common/exceptions.py
deleted file mode 100644
index 7d1b9d9..0000000
--- a/networking_sfc/services/sfc/common/exceptions.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Exceptions used by SFC plugin and drivers."""
-
-from neutron.common import exceptions
-
-
-class SfcDriverError(exceptions.NeutronException):
- """SFC driver call failed."""
- message = _("%(method)s failed.")
-
-
-class SfcException(exceptions.NeutronException):
- """Base for SFC driver exceptions returned to user."""
- pass
-
-
-class SfcBadRequest(exceptions.BadRequest, SfcException):
- """Base for SFC driver bad request exceptions returned to user."""
- pass
-
-
-class SfcNoSubnetGateway(SfcDriverError):
- """No subnet gateway."""
- message = _("There is no %(type)s of ip prefix %(cidr)s.")
-
-
-class SfcNoSuchSubnet(SfcDriverError):
- """No such subnet."""
- message = _("There is no %(type)s of %(cidr)s.")
-
-
-class FlowClassifierInvalid(SfcDriverError):
- """Invalid flow classifier."""
- message = _("There is no %(type)s assigned.")
diff --git a/networking_sfc/services/sfc/common/ovs_ext_lib.py b/networking_sfc/services/sfc/common/ovs_ext_lib.py
deleted file mode 100644
index 01fbd04..0000000
--- a/networking_sfc/services/sfc/common/ovs_ext_lib.py
+++ /dev/null
@@ -1,187 +0,0 @@
-# Copyright 2015 Huawei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import collections
-from neutron.agent.common import ovs_lib
-from neutron.agent.common import utils
-from neutron.common import exceptions
-from neutron.i18n import _LE
-from neutron.plugins.common import constants
-from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl import (
- ovs_bridge)
-from oslo_log import log as logging
-import six
-
-
-# Special return value for an invalid OVS ofport
-INVALID_OFPORT = '-1'
-
-LOG = logging.getLogger(__name__)
-
-
-def get_port_mask(min_port, max_port):
- """get port/mask serial by port range."""
- if min_port < 1 or max_port > 0xffff or min_port > max_port:
- msg = _("the port range is invalid")
- raise exceptions.InvalidInput(error_message=msg)
- masks = []
- while min_port <= max_port:
- mask = 0xffff
- while mask != 0:
- next_mask = (mask << 1) & 0xffff
- port_start = min_port & next_mask
- port_end = min_port + (next_mask ^ 0xffff)
- if port_start == min_port and port_end <= max_port:
- mask = next_mask
- else:
- break
- masks.append('0x%x/0x%x' % (min_port, mask))
- min_port = min_port + (mask ^ 0xffff) + 1
-
- return masks
-
-
-class OVSBridgeExt(ovs_bridge.OVSAgentBridge):
- def setup_controllers(self, conf):
- self.set_protocols("[]")
- self.del_controller()
-
- def dump_flows_full_match(self, flow_str):
- retval = None
- flows = self.run_ofctl("dump-flows", [flow_str])
- if flows:
- retval = '\n'.join(item for item in flows.splitlines()
- if 'NXST' not in item and 'OFPST' not in item)
- return retval
-
- def mod_flow(self, **kwargs):
- flow_copy = kwargs.copy()
- flow_copy.pop('actions')
- flow_str = ovs_lib._build_flow_expr_str(flow_copy, 'del')
- dump_flows = self.dump_flows_full_match(flow_str)
- if dump_flows == '':
- self.do_action_flows('add', [kwargs])
- else:
- self.do_action_flows('mod', [kwargs])
-
- def add_nsh_tunnel_port(self, port_name, remote_ip, local_ip,
- tunnel_type=constants.TYPE_GRE,
- vxlan_udp_port=constants.VXLAN_UDP_PORT,
- dont_fragment=True,
- in_nsp=None,
- in_nsi=None):
- attrs = [('type', tunnel_type)]
- # This is an OrderedDict solely to make a test happy
- options = collections.OrderedDict()
- vxlan_uses_custom_udp_port = (
- tunnel_type == constants.TYPE_VXLAN and
- vxlan_udp_port != constants.VXLAN_UDP_PORT
- )
- if vxlan_uses_custom_udp_port:
- options['dst_port'] = vxlan_udp_port
- options['df_default'] = str(dont_fragment).lower()
- options['remote_ip'] = 'flow'
- options['local_ip'] = local_ip
- options['in_key'] = 'flow'
- options['out_key'] = 'flow'
- if in_nsp is not None and in_nsi is not None:
- options['nsp'] = str(in_nsp)
- options['nsi'] = str(in_nsi)
- elif in_nsp is None and in_nsi is None:
- options['nsp'] = 'flow'
- options['nsi'] = 'flow'
- attrs.append(('options', options))
- ofport = self.add_port(port_name, *attrs)
- if (
- tunnel_type == constants.TYPE_VXLAN and
- ofport == INVALID_OFPORT
- ):
- LOG.error(
- _LE('Unable to create VXLAN tunnel port for service chain. '
- 'Please ensure that an openvswitch version that supports '
- 'VXLAN for service chain is installed.')
- )
- return ofport
-
- def run_ofctl(self, cmd, args, process_input=None):
- # We need to dump-groups according to group Id,
- # which is a feature of OpenFlow1.5
- full_args = [
- "ovs-ofctl", "-O openflow13", cmd, self.br_name
- ] + args
- try:
- return utils.execute(full_args, run_as_root=True,
- process_input=process_input)
- except Exception as e:
- LOG.exception(e)
- LOG.error(_LE("Unable to execute %(args)s."),
- {'args': full_args})
-
- def do_action_groups(self, action, kwargs_list):
- group_strs = [_build_group_expr_str(kw, action) for kw in kwargs_list]
- if action == 'add' or action == 'del':
- self.run_ofctl('%s-groups' % action, ['-'], '\n'.join(group_strs))
- elif action == 'mod':
- self.run_ofctl('%s-group' % action, ['-'], '\n'.join(group_strs))
- else:
- msg = _("Action is illegal")
- raise exceptions.InvalidInput(error_message=msg)
-
- def add_group(self, **kwargs):
- self.do_action_groups('add', [kwargs])
-
- def mod_group(self, **kwargs):
- self.do_action_groups('mod', [kwargs])
-
- def delete_group(self, **kwargs):
- self.do_action_groups('del', [kwargs])
-
- def dump_group_for_id(self, group_id):
- retval = None
- group_str = "%d" % group_id
- group = self.run_ofctl("dump-groups", [group_str])
- if group:
- retval = '\n'.join(item for item in group.splitlines()
- if 'NXST' not in item)
- return retval
-
-
-def _build_group_expr_str(group_dict, cmd):
- group_expr_arr = []
- buckets = None
- groupId = None
-
- if cmd != 'del':
- if "group_id" not in group_dict:
- msg = _("Must specify one groupId on groupo addition"
- " or modification")
- raise exceptions.InvalidInput(error_message=msg)
- groupId = "group_id=%s" % group_dict.pop('group_id')
-
- if "buckets" not in group_dict:
- msg = _("Must specify one or more buckets on group addition"
- " or modification")
- raise exceptions.InvalidInput(error_message=msg)
- buckets = "%s" % group_dict.pop('buckets')
-
- if groupId:
- group_expr_arr.append(groupId)
-
- for key, value in six.iteritems(group_dict):
- group_expr_arr.append("%s=%s" % (key, value))
-
- if buckets:
- group_expr_arr.append(buckets)
-
- return ','.join(group_expr_arr)
diff --git a/networking_sfc/services/sfc/driver_manager.py b/networking_sfc/services/sfc/driver_manager.py
deleted file mode 100644
index c8a212a..0000000
--- a/networking_sfc/services/sfc/driver_manager.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-from oslo_log import log
-import stevedore
-
-from neutron.i18n import _LE
-from neutron.i18n import _LI
-
-from networking_sfc.services.sfc.common import exceptions as sfc_exc
-
-
-LOG = log.getLogger(__name__)
-cfg.CONF.import_opt('drivers',
- 'networking_sfc.services.sfc.common.config',
- group='sfc')
-
-
-class SfcDriverManager(stevedore.named.NamedExtensionManager):
- """Implementation of SFC drivers."""
-
- def __init__(self):
- # Registered sfc drivers, keyed by name.
- self.drivers = {}
- # Ordered list of sfc drivers, defining
- # the order in which the drivers are called.
- self.ordered_drivers = []
- LOG.info(_LI("Configured SFC drivers: %s"),
- cfg.CONF.sfc.drivers)
- super(SfcDriverManager, self).__init__('networking_sfc.sfc.drivers',
- cfg.CONF.sfc.drivers,
- invoke_on_load=True,
- name_order=True)
- LOG.info(_LI("Loaded SFC drivers: %s"), self.names())
- self._register_drivers()
-
- def _register_drivers(self):
- """Register all SFC drivers.
-
- This method should only be called once in the SfcDriverManager
- constructor.
- """
- for ext in self:
- self.drivers[ext.name] = ext
- self.ordered_drivers.append(ext)
- LOG.info(_LI("Registered SFC drivers: %s"),
- [driver.name for driver in self.ordered_drivers])
-
- def initialize(self):
- # ServiceChain bulk operations requires each driver to support them
- self.native_bulk_support = True
- for driver in self.ordered_drivers:
- LOG.info(_LI("Initializing SFC driver '%s'"), driver.name)
- driver.obj.initialize()
- self.native_bulk_support &= getattr(driver.obj,
- 'native_bulk_support', True)
-
- def _call_drivers(self, method_name, context):
- """Helper method for calling a method across all SFC drivers.
-
- :param method_name: name of the method to call
- :param context: context parameter to pass to each method call
- :param continue_on_failure: whether or not to continue to call
- all SFC drivers once one has raised an exception
- if any SFC driver call fails.
- """
- for driver in self.ordered_drivers:
- try:
- getattr(driver.obj, method_name)(context)
- except Exception as e:
- # This is an internal failure.
- LOG.exception(e)
- LOG.error(
- _LE("SFC driver '%(name)s' failed in %(method)s"),
- {'name': driver.name, 'method': method_name}
- )
- raise sfc_exc.SfcDriverError(
- method=method_name
- )
-
- def create_port_chain(self, context):
- self._call_drivers("create_port_chain", context)
-
- def update_port_chain(self, context):
- self._call_drivers("update_port_chain", context)
-
- def delete_port_chain(self, context):
- self._call_drivers("delete_port_chain", context)
-
- def create_port_pair(self, context):
- self._call_drivers("create_port_pair", context)
-
- def update_port_pair(self, context):
- self._call_drivers("update_port_pair", context)
-
- def delete_port_pair(self, context):
- self._call_drivers("delete_port_pair", context)
-
- def create_port_pair_group(self, context):
- self._call_drivers("create_port_pair_group", context)
-
- def update_port_pair_group(self, context):
- self._call_drivers("update_port_pair_group", context)
-
- def delete_port_pair_group(self, context):
- self._call_drivers("delete_port_pair_group", context)
diff --git a/networking_sfc/services/sfc/drivers/__init__.py b/networking_sfc/services/sfc/drivers/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/services/sfc/drivers/__init__.py
+++ /dev/null
diff --git a/networking_sfc/services/sfc/drivers/base.py b/networking_sfc/services/sfc/drivers/base.py
deleted file mode 100644
index 0816789..0000000
--- a/networking_sfc/services/sfc/drivers/base.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-import six
-
-
-@six.add_metaclass(abc.ABCMeta)
-class SfcDriverBase(object):
- """SFC Driver Base Class."""
-
- @abc.abstractmethod
- def create_port_chain(self, context):
- pass
-
- @abc.abstractmethod
- def delete_port_chain(self, context):
- pass
-
- @abc.abstractmethod
- def update_port_chain(self, context):
- pass
-
- @abc.abstractmethod
- def create_port_pair(self, context):
- pass
-
- @abc.abstractmethod
- def delete_port_pair(self, context):
- pass
-
- @abc.abstractmethod
- def update_port_pair(self, context):
- pass
-
- @abc.abstractmethod
- def create_port_pair_group(self, context):
- pass
-
- @abc.abstractmethod
- def delete_port_pair_group(self, context):
- pass
-
- @abc.abstractmethod
- def update_port_pair_group(self, context):
- pass
diff --git a/networking_sfc/services/sfc/drivers/dummy/__init__.py b/networking_sfc/services/sfc/drivers/dummy/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/services/sfc/drivers/dummy/__init__.py
+++ /dev/null
diff --git a/networking_sfc/services/sfc/drivers/dummy/dummy.py b/networking_sfc/services/sfc/drivers/dummy/dummy.py
deleted file mode 100644
index 1ddd7d0..0000000
--- a/networking_sfc/services/sfc/drivers/dummy/dummy.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import helpers as log_helpers
-
-from networking_sfc.services.sfc.drivers import base as sfc_driver
-
-
-class DummyDriver(sfc_driver.SfcDriverBase):
- """SFC Driver Dummy Class."""
- def initialize(self):
- pass
-
- @log_helpers.log_method_call
- def create_port_chain(self, context):
- pass
-
- @log_helpers.log_method_call
- def delete_port_chain(self, context):
- pass
-
- @log_helpers.log_method_call
- def update_port_chain(self, context):
- pass
-
- @log_helpers.log_method_call
- def create_port_pair_group(self, context):
- pass
-
- @log_helpers.log_method_call
- def delete_port_pair_group(self, context):
- pass
-
- @log_helpers.log_method_call
- def update_port_pair_group(self, context):
- pass
-
- @log_helpers.log_method_call
- def create_port_pair(self, context):
- pass
-
- @log_helpers.log_method_call
- def delete_port_pair(self, context):
- pass
-
- @log_helpers.log_method_call
- def update_port_pair(self, context):
- pass
diff --git a/networking_sfc/services/sfc/drivers/ovs/__init__.py b/networking_sfc/services/sfc/drivers/ovs/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/services/sfc/drivers/ovs/__init__.py
+++ /dev/null
diff --git a/networking_sfc/services/sfc/drivers/ovs/constants.py b/networking_sfc/services/sfc/drivers/ovs/constants.py
deleted file mode 100644
index 30e2c37..0000000
--- a/networking_sfc/services/sfc/drivers/ovs/constants.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron.common import constants as n_const
-
-
-INGRESS_DIR = 'ingress'
-EGRESS_DIR = 'egress'
-
-STATUS_BUILDING = 'building'
-STATUS_ACTIVE = 'active'
-STATUS_ERROR = 'error'
-STATUS_DELETING = 'deleting'
-
-
-PORTFLOW_OPT_ADD = 'add-flows'
-PROTFLOW_OPT_DELETE = 'delete-flows'
-PROTFLOW_OPT_UPDATE = 'update-flows'
-
-
-SRC_NODE = 'src_node'
-DST_NODE = 'dst_node'
-SF_NODE = 'sf_node'
-
-RES_TYPE_GROUP = 'group'
-RES_TYPE_NSP = 'nsp'
-
-INSERTION_TYPE_L2 = 'l2'
-INSERTION_TYPE_L3 = 'l3'
-INSERTION_TYPE_BITW = 'bitw'
-INSERTION_TYPE_TAP = 'tap'
-
-MAX_HASH = 16
-
-INSERTION_TYPE_DICT = {
- n_const.DEVICE_OWNER_ROUTER_HA_INTF: INSERTION_TYPE_L3,
- n_const.DEVICE_OWNER_ROUTER_INTF: INSERTION_TYPE_L3,
- n_const.DEVICE_OWNER_ROUTER_GW: INSERTION_TYPE_L3,
- n_const.DEVICE_OWNER_FLOATINGIP: INSERTION_TYPE_L3,
- n_const.DEVICE_OWNER_DHCP: INSERTION_TYPE_TAP,
- n_const.DEVICE_OWNER_DVR_INTERFACE: INSERTION_TYPE_L3,
- n_const.DEVICE_OWNER_AGENT_GW: INSERTION_TYPE_L3,
- n_const.DEVICE_OWNER_ROUTER_SNAT: INSERTION_TYPE_TAP,
- n_const.DEVICE_OWNER_LOADBALANCER: INSERTION_TYPE_TAP,
- 'compute': INSERTION_TYPE_L2
-}
diff --git a/networking_sfc/services/sfc/drivers/ovs/db.py b/networking_sfc/services/sfc/drivers/ovs/db.py
deleted file mode 100644
index 8d3c87d..0000000
--- a/networking_sfc/services/sfc/drivers/ovs/db.py
+++ /dev/null
@@ -1,426 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import six
-
-from oslo_log import helpers as log_helpers
-from oslo_log import log as logging
-from oslo_utils import uuidutils
-
-from neutron.common import exceptions as n_exc
-from neutron import context as n_context
-from neutron.db import common_db_mixin
-from neutron.db import model_base
-from neutron.db import models_v2
-
-import sqlalchemy as sa
-from sqlalchemy import orm
-from sqlalchemy.orm import exc
-from sqlalchemy import sql
-
-
-LOG = logging.getLogger(__name__)
-
-
-class PortPairDetailNotFound(n_exc.NotFound):
- message = _("Portchain port brief %(port_id)s could not be found")
-
-
-class NodeNotFound(n_exc.NotFound):
- message = _("Portchain node %(node_id)s could not be found")
-
-
-# name changed to ChainPathId
-class UuidIntidAssoc(model_base.BASEV2, models_v2.HasId):
- __tablename__ = 'sfc_uuid_intid_associations'
- uuid = sa.Column(sa.String(36), primary_key=True)
- intid = sa.Column(sa.Integer, unique=True, nullable=False)
- type_ = sa.Column(sa.String(32), nullable=False)
-
- def __init__(self, uuid, intid, type_):
- self.uuid = uuid
- self.intid = intid
- self.type_ = type_
-
-
-def singleton(class_):
- instances = {}
-
- def getinstance(*args, **kwargs):
- if class_ not in instances:
- instances[class_] = class_(*args, **kwargs)
- return instances[class_]
- return getinstance
-
-
-@singleton
-class IDAllocation(object):
- def __init__(self, context):
- # Get the inital range from conf file.
- conf_obj = {'group': [1, 255], 'portchain': [256, 65536]}
- self.conf_obj = conf_obj
- self.session = context.session
-
- @log_helpers.log_method_call
- def assign_intid(self, type_, uuid):
- query = self.session.query(UuidIntidAssoc).filter_by(
- type_=type_).order_by(UuidIntidAssoc.intid)
-
- allocated_int_ids = {obj.intid for obj in query.all()}
-
- # Find the first one from the available range that
- # is not in allocated_int_ids
- start, end = self.conf_obj[type_][0], self.conf_obj[type_][1]+1
- for init_id in six.moves.range(start, end):
- if init_id not in allocated_int_ids:
- with self.session.begin(subtransactions=True):
- uuid_intid = UuidIntidAssoc(
- uuid, init_id, type_)
- self.session.add(uuid_intid)
- return init_id
- else:
- return None
-
- @log_helpers.log_method_call
- def get_intid_by_uuid(self, type_, uuid):
-
- query_obj = self.session.query(UuidIntidAssoc).filter_by(
- type_=type_, uuid=uuid).first()
- if query_obj:
- return query_obj.intid
- else:
- return None
-
- @log_helpers.log_method_call
- def release_intid(self, type_, intid):
- """Release int id.
-
- @param: type_: str
- @param: intid: int
- """
- with self.session.begin(subtransactions=True):
- query_obj = self.session.query(UuidIntidAssoc).filter_by(
- intid=intid, type_=type_).first()
-
- if query_obj:
- self.session.delete(query_obj)
-
-
-class PathPortAssoc(model_base.BASEV2):
- """path port association table.
-
- It represents the association table which associate path_nodes with
- portpair_details.
- """
- __tablename__ = 'sfc_path_port_associations'
- pathnode_id = sa.Column(sa.String(36),
- sa.ForeignKey(
- 'sfc_path_nodes.id', ondelete='CASCADE'),
- primary_key=True)
- portpair_id = sa.Column(sa.String(36),
- sa.ForeignKey('sfc_portpair_details.id',
- ondelete='CASCADE'),
- primary_key=True)
- weight = sa.Column(sa.Integer, nullable=False, default=1)
-
-
-class PortPairDetail(model_base.BASEV2, models_v2.HasId,
- models_v2.HasTenant):
- __tablename__ = 'sfc_portpair_details'
- ingress = sa.Column(sa.String(36), nullable=True)
- egress = sa.Column(sa.String(36), nullable=True)
- host_id = sa.Column(sa.String(255), nullable=False)
- mac_address = sa.Column(sa.String(32), nullable=False)
- network_type = sa.Column(sa.String(8))
- segment_id = sa.Column(sa.Integer)
- local_endpoint = sa.Column(sa.String(64), nullable=False)
- path_nodes = orm.relationship(PathPortAssoc,
- backref='port_pair_detail',
- lazy="joined",
- cascade='all,delete')
-
-
-class PathNode(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
- __tablename__ = 'sfc_path_nodes'
- nsp = sa.Column(sa.Integer, nullable=False)
- nsi = sa.Column(sa.Integer, nullable=False)
- node_type = sa.Column(sa.String(32))
- portchain_id = sa.Column(
- sa.String(255),
- sa.ForeignKey('sfc_port_chains.id', ondelete='CASCADE'))
- status = sa.Column(sa.String(32))
- portpair_details = orm.relationship(PathPortAssoc,
- backref='path_nodes',
- lazy="joined",
- cascade='all,delete')
- next_group_id = sa.Column(sa.Integer)
- next_hop = sa.Column(sa.String(512))
-
-
-class OVSSfcDriverDB(common_db_mixin.CommonDbMixin):
- def initialize(self):
- self.admin_context = n_context.get_admin_context()
-
- def _make_pathnode_dict(self, node, fields=None):
- res = {'id': node['id'],
- 'tenant_id': node['tenant_id'],
- 'node_type': node['node_type'],
- 'nsp': node['nsp'],
- 'nsi': node['nsi'],
- 'next_group_id': node['next_group_id'],
- 'next_hop': node['next_hop'],
- 'portchain_id': node['portchain_id'],
- 'status': node['status'],
- 'portpair_details': [pair_detail['portpair_id']
- for pair_detail in node['portpair_details']
- ]
- }
-
- return self._fields(res, fields)
-
- def _make_port_detail_dict(self, port, fields=None):
- res = {'id': port['id'],
- 'tenant_id': port['tenant_id'],
- 'host_id': port['host_id'],
- 'ingress': port.get('ingress', None),
- 'egress': port.get('egress', None),
- 'segment_id': port['segment_id'],
- 'local_endpoint': port['local_endpoint'],
- 'mac_address': port['mac_address'],
- 'network_type': port['network_type'],
- 'path_nodes': [{'pathnode_id': node['pathnode_id'],
- 'weight': node['weight']}
- for node in port['path_nodes']]
- }
-
- return self._fields(res, fields)
-
- def _make_pathport_assoc_dict(self, assoc, fields=None):
- res = {'pathnode_id': assoc['pathnode_id'],
- 'portpair_id': assoc['portpair_id'],
- 'weight': assoc['weight'],
- }
-
- return self._fields(res, fields)
-
- def _get_path_node(self, id):
- try:
- node = self._get_by_id(self.admin_context, PathNode, id)
- except exc.NoResultFound:
- raise NodeNotFound(node_id=id)
- return node
-
- def _get_port_detail(self, id):
- try:
- port = self._get_by_id(self.admin_context, PortPairDetail, id)
- except exc.NoResultFound:
- raise PortPairDetailNotFound(port_id=id)
- return port
-
- def create_port_detail(self, port):
- with self.admin_context.session.begin(subtransactions=True):
- args = self._filter_non_model_columns(port, PortPairDetail)
- args['id'] = uuidutils.generate_uuid()
- port_obj = PortPairDetail(**args)
- self.admin_context.session.add(port_obj)
- return self._make_port_detail_dict(port_obj)
-
- def create_path_node(self, node):
- with self.admin_context.session.begin(subtransactions=True):
- args = self._filter_non_model_columns(node, PathNode)
- args['id'] = uuidutils.generate_uuid()
- node_obj = PathNode(**args)
- self.admin_context.session.add(node_obj)
- return self._make_pathnode_dict(node_obj)
-
- def create_pathport_assoc(self, assoc):
- with self.admin_context.session.begin(subtransactions=True):
- args = self._filter_non_model_columns(assoc, PathPortAssoc)
- assoc_obj = PathPortAssoc(**args)
- self.admin_context.session.add(assoc_obj)
- return self._make_pathport_assoc_dict(assoc_obj)
-
- def delete_pathport_assoc(self, pathnode_id, portdetail_id):
- with self.admin_context.session.begin(subtransactions=True):
- self.admin_context.session.query(PathPortAssoc).filter_by(
- pathnode_id=pathnode_id,
- portpair_id=portdetail_id).delete()
-
- def update_port_detail(self, id, port):
- with self.admin_context.session.begin(subtransactions=True):
- port_obj = self._get_port_detail(id)
- for key, value in six.iteritems(port):
- if key == 'path_nodes':
- pns = []
- for pn in value:
- pn_id = pn['pathnode_id']
- self._get_path_node(pn_id)
- query = self._model_query(
- self.admin_context, PathPortAssoc)
- pn_association = query.filter_by(
- pathnode_id=pn_id,
- portpair_id=id
- ).first()
- if not pn_association:
- pn_association = PathPortAssoc(
- pathnode_id=pn_id,
- portpair_id=id,
- weight=pn.get('weight', 1)
- )
- pns.append(pn_association)
- port_obj[key] = pns
- else:
- port_obj[key] = value
- port_obj.update(port)
- return self._make_port_detail_dict(port_obj)
-
- def update_path_node(self, id, node):
- with self.admin_context.session.begin(subtransactions=True):
- node_obj = self._get_path_node(id)
- for key, value in six.iteritems(node):
- if key == 'portpair_details':
- pds = []
- for pd_id in value:
- self._get_port_detail(pd_id)
- query = self._model_query(
- self.admin_context, PathPortAssoc)
- pd_association = query.filter_by(
- pathnode_id=id,
- portpair_id=pd_id
- ).first()
- if not pd_association:
- pd_association = PathPortAssoc(
- pathnode_id=id,
- portpair_id=pd_id
- )
- pds.append(pd_association)
- node_obj[key] = pds
- else:
- node_obj[key] = value
- return self._make_pathnode_dict(node_obj)
-
- def delete_port_detail(self, id):
- with self.admin_context.session.begin(subtransactions=True):
- port_obj = self._get_port_detail(id)
- self.admin_context.session.delete(port_obj)
-
- def delete_path_node(self, id):
- with self.admin_context.session.begin(subtransactions=True):
- node_obj = self._get_path_node(id)
- self.admin_context.session.delete(node_obj)
-
- def get_port_detail(self, id):
- with self.admin_context.session.begin(subtransactions=True):
- port_obj = self._get_port_detail(id)
- return self._make_port_detail_dict(port_obj)
-
- def get_port_detail_without_exception(self, id):
- with self.admin_context.session.begin(subtransactions=True):
- try:
- port = self._get_by_id(
- self.admin_context, PortPairDetail, id)
- except exc.NoResultFound:
- return None
- return self._make_port_detail_dict(port)
-
- def get_path_node(self, id):
- with self.admin_context.session.begin(subtransactions=True):
- node_obj = self._get_path_node(id)
- return self._make_pathnode_dict(node_obj)
-
- def get_path_nodes_by_filter(self, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- with self.admin_context.session.begin(subtransactions=True):
- qry = self._get_path_nodes_by_filter(
- filters, fields, sorts, limit,
- marker, page_reverse
- )
- all_items = qry.all()
- if all_items:
- return [self._make_pathnode_dict(item) for item in all_items]
-
- return None
-
- def get_path_node_by_filter(self, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- with self.admin_context.session.begin(subtransactions=True):
- qry = self._get_path_nodes_by_filter(
- filters, fields, sorts, limit,
- marker, page_reverse)
- first = qry.first()
- if first:
- return self._make_pathnode_dict(first)
-
- return None
-
- def _get_path_nodes_by_filter(self, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- qry = self.admin_context.session.query(PathNode)
- if filters:
- for key, value in six.iteritems(filters):
- column = getattr(PathNode, key, None)
- if column:
- if not value:
- qry = qry.filter(sql.false())
- else:
- qry = qry.filter(column == value)
- return qry
-
- def get_port_details_by_filter(self, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- with self.admin_context.session.begin(subtransactions=True):
- qry = self._get_port_details_by_filter(
- filters, fields, sorts, limit,
- marker, page_reverse)
- all_items = qry.all()
- if all_items:
- return [
- self._make_port_detail_dict(item)
- for item in all_items
- ]
-
- return None
-
- def get_port_detail_by_filter(self, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- with self.admin_context.session.begin(subtransactions=True):
- qry = self._get_port_details_by_filter(
- filters, fields, sorts, limit,
- marker, page_reverse)
- first = qry.first()
- if first:
- return self._make_port_detail_dict(first)
-
- return None
-
- def _get_port_details_by_filter(self, filters=None, fields=None,
- sorts=None, limit=None, marker=None,
- page_reverse=False):
- qry = self.admin_context.session.query(PortPairDetail)
- if filters:
- for key, value in six.iteritems(filters):
- column = getattr(PortPairDetail, key, None)
- if column:
- if not value:
- qry = qry.filter(sql.false())
- else:
- qry = qry.filter(column == value)
-
- return qry
diff --git a/networking_sfc/services/sfc/drivers/ovs/driver.py b/networking_sfc/services/sfc/drivers/ovs/driver.py
deleted file mode 100644
index 9dfc40d..0000000
--- a/networking_sfc/services/sfc/drivers/ovs/driver.py
+++ /dev/null
@@ -1,1076 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import netaddr
-
-# from eventlet import greenthread
-
-from neutron.common import constants as nc_const
-from neutron.common import rpc as n_rpc
-
-from neutron import manager
-
-from neutron.i18n import _LE
-from neutron.i18n import _LW
-
-from neutron.plugins.common import constants as np_const
-
-
-from oslo_log import helpers as log_helpers
-from oslo_log import log as logging
-from oslo_serialization import jsonutils
-
-from networking_sfc.extensions import flowclassifier
-from networking_sfc.extensions import sfc
-from networking_sfc.services.sfc.common import exceptions as exc
-from networking_sfc.services.sfc.drivers import base as driver_base
-from networking_sfc.services.sfc.drivers.ovs import(
- rpc_topics as sfc_topics)
-from networking_sfc.services.sfc.drivers.ovs import(
- db as ovs_sfc_db)
-from networking_sfc.services.sfc.drivers.ovs import(
- rpc as ovs_sfc_rpc)
-from networking_sfc.services.sfc.drivers.ovs import (
- constants as ovs_const)
-
-
-LOG = logging.getLogger(__name__)
-
-
-class OVSSfcDriver(driver_base.SfcDriverBase,
- ovs_sfc_db.OVSSfcDriverDB):
- """Sfc Driver Base Class."""
-
- def initialize(self):
- super(OVSSfcDriver, self).initialize()
- self.ovs_driver_rpc = ovs_sfc_rpc.SfcAgentRpcClient(
- sfc_topics.SFC_AGENT
- )
-
- self.id_pool = ovs_sfc_db.IDAllocation(self.admin_context)
- self._setup_rpc()
-
- def _setup_rpc(self):
- # Setup a rpc server
- self.topic = sfc_topics.SFC_PLUGIN
- self.endpoints = [ovs_sfc_rpc.SfcRpcCallback(self)]
- self.conn = n_rpc.create_connection(new=True)
- self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
- self.conn.consume_in_threads()
-
- def _get_subnet(self, core_plugin, tenant_id, cidr):
- filters = {'tenant_id': [tenant_id]}
- subnets = core_plugin.get_subnets(self.admin_context, filters=filters)
- cidr_set = netaddr.IPSet([cidr])
-
- for subnet in subnets:
- subnet_cidr_set = netaddr.IPSet([subnet['cidr']])
- if cidr_set.issubset(subnet_cidr_set):
- return subnet
-
- def _get_fc_dst_subnet_gw_port(self, fc):
- core_plugin = manager.NeutronManager.get_plugin()
- subnet = self._get_subnet(core_plugin,
- fc['tenant_id'],
- fc['destination_ip_prefix'])
-
- return self._get_port_subnet_gw_info(core_plugin, subnet)
-
- def _get_port_subnet_gw_info_by_port_id(self, id):
- core_plugin = manager.NeutronManager.get_plugin()
- subnet = self._get_subnet_by_port(core_plugin, id)
- return self._get_port_subnet_gw_info(core_plugin,
- subnet)
-
- def _get_port_subnet_gw_info(self, core_plugin, subnet):
- filters = {
- 'device_owner':
- [nc_const.DEVICE_OWNER_ROUTER_INTF]
- }
- gw_ports = core_plugin.get_ports(self.admin_context, filters=filters)
- for port in gw_ports:
- for fixed_ip in port['fixed_ips']:
- if subnet["id"] == fixed_ip['subnet_id']:
- return (port['mac_address'],
- subnet['cidr'],
- subnet['network_id'])
-
- raise exc.NoSubnetGateway(
- type='subnet gateway',
- cidr=subnet['cidr'])
-
- def _get_subnet_by_port(self, core_plugin, id):
- port = core_plugin.get_port(self.admin_context, id)
- for ip in port['fixed_ips']:
- subnet = core_plugin.get_subnet(self.admin_context,
- ip["subnet_id"])
- # currently only support one subnet for a port
- break
-
- return subnet
-
- @log_helpers.log_method_call
- def _get_portgroup_members(self, context, pg_id):
- next_group_members = []
- group_intid = self.id_pool.get_intid_by_uuid('group', pg_id)
- LOG.debug('group_intid: %s', group_intid)
- pg = context._plugin.get_port_pair_group(context._plugin_context,
- pg_id)
- for pp_id in pg['port_pairs']:
- pp = context._plugin.get_port_pair(context._plugin_context, pp_id)
- filters = {}
- if pp.get('ingress', None):
- filters = dict(dict(ingress=pp['ingress']), **filters)
- if pp.get('egress', None):
- filters = dict(dict(egress=pp['egress']), **filters)
- pd = self.get_port_detail_by_filter(filters)
- if pd:
- next_group_members.append(
- dict(portpair_id=pd['id'], weight=1))
- return group_intid, next_group_members
-
- def _get_port_pair_detail_by_port_pair(self, context, port_pair_id):
- pp = context._plugin.get_port_pair(context._plugin_context,
- port_pair_id)
- filters = {}
- if pp.get('ingress', None):
- filters = dict(dict(ingress=pp['ingress']), **filters)
- if pp.get('egress', None):
- filters = dict(dict(egress=pp['egress']), **filters)
- pd = self.get_port_detail_by_filter(filters)
-
- return pd
-
- @log_helpers.log_method_call
- def _add_flowclassifier_port_assoc(self, fc_ids, tenant_id,
- src_node, dst_node,
- last_sf_node=None):
- dst_ports = []
- for fc in self._get_fcs_by_ids(fc_ids):
- if fc.get('logical_source_port', ''):
- # lookup the source port
- src_pd_filter = dict(egress=fc['logical_source_port'],
- tenant_id=tenant_id
- )
- src_pd = self.get_port_detail_by_filter(src_pd_filter)
-
- if not src_pd:
- # Create source port detail
- src_pd = self._create_port_detail(src_pd_filter)
- LOG.debug('create src port detail: %s', src_pd)
-
- # Create associate relationship
- assco_args = {'portpair_id': src_pd['id'],
- 'pathnode_id': src_node['id'],
- 'weight': 1,
- }
- sna = self.create_pathport_assoc(assco_args)
- LOG.debug('create assoc src port with node: %s', sna)
- src_node['portpair_details'].append(src_pd['id'])
-
- if fc.get('logical_destination_port', ''):
- dst_pd_filter = dict(ingress=fc['logical_destination_port'],
- tenant_id=tenant_id
- )
- dst_pd = self.get_port_detail_by_filter(dst_pd_filter)
-
- if not dst_pd:
- # Create dst port detail
- dst_pd = self._create_port_detail(dst_pd_filter)
- LOG.debug('create dst port detail: %s', dst_pd)
-
- # Create associate relationship
- dst_assco_args = {'portpair_id': dst_pd['id'],
- 'pathnode_id': dst_node['id'],
- 'weight': 1,
- }
- dna = self.create_pathport_assoc(dst_assco_args)
- LOG.debug('create assoc dst port with node: %s', dna)
- dst_node['portpair_details'].append(dst_pd['id'])
-
- dst_ports.append(dict(portpair_id=dst_pd['id'], weight=1))
-
- if last_sf_node:
- if last_sf_node['next_hop']:
- next_hops = jsonutils.loads(last_sf_node['next_hop'])
- next_hops.extend(dst_ports)
- last_sf_node['next_hop'] = jsonutils.dumps(next_hops)
- # update nexthop info of pre node
- self.update_path_node(last_sf_node['id'],
- last_sf_node)
- return dst_ports
-
- def _remove_flowclassifier_port_assoc(self, fc_ids, tenant_id,
- src_node=None, dst_node=None,
- last_sf_node=None):
- if not fc_ids:
- return
- for fc in self._get_fcs_by_ids(fc_ids):
- if fc.get('logical_source_port', ''):
- # delete source port detail
- src_pd_filter = dict(egress=fc['logical_source_port'],
- tenant_id=tenant_id
- )
- pds = self.get_port_details_by_filter(src_pd_filter)
- if pds:
- for pd in pds:
- # update src_node portpair_details refence info
- if src_node and pd['id'] in src_node[
- 'portpair_details'
- ]:
- src_node['portpair_details'].remove(pd['id'])
- if len(pd['path_nodes']) == 1:
- self.delete_port_detail(pd['id'])
-
- if fc.get('logical_destination_port', ''):
- # Create dst port detail
- dst_pd_filter = dict(ingress=fc['logical_destination_port'],
- tenant_id=tenant_id
- )
- pds = self.get_port_details_by_filter(dst_pd_filter)
- if pds:
- for pd in pds:
- # update dst_node portpair_details refence info
- if dst_node and pd['id'] in dst_node[
- 'portpair_details'
- ]:
- # update portpair_details of this node
- dst_node['portpair_details'].remove(pd['id'])
- # update last hop(SF-group) next hop info
- if last_sf_node:
- next_hop = dict(portpair_id=pd['id'],
- weight=1)
- next_hops = jsonutils.loads(
- last_sf_node['next_hop'])
- next_hops.remove(next_hop)
- last_sf_node['next_hop'] = jsonutils.dumps(
- next_hops)
- if len(pd['path_nodes']) == 1:
- self.delete_port_detail(pd['id'])
-
- if last_sf_node:
- # update nexthop info of pre node
- self.update_path_node(last_sf_node['id'],
- last_sf_node)
-
- @log_helpers.log_method_call
- def _create_portchain_path(self, context, port_chain):
- src_node, src_pd, dst_node, dst_pd = (({}, ) * 4)
- path_nodes, dst_ports = [], []
- # Create an assoc object for chain_id and path_id
- # context = context._plugin_context
- path_id = self.id_pool.assign_intid('portchain', port_chain['id'])
-
- if not path_id:
- LOG.error(_LE('No path_id available for creating port chain path'))
- return
-
- next_group_intid, next_group_members = self._get_portgroup_members(
- context, port_chain['port_pair_groups'][0])
-
- port_pair_groups = port_chain['port_pair_groups']
- sf_path_length = len(port_pair_groups)
- # Create a head node object for port chain
- src_args = {'tenant_id': port_chain['tenant_id'],
- 'node_type': ovs_const.SRC_NODE,
- 'nsp': path_id,
- 'nsi': 0xff,
- 'portchain_id': port_chain['id'],
- 'status': ovs_const.STATUS_BUILDING,
- 'next_group_id': next_group_intid,
- 'next_hop': jsonutils.dumps(next_group_members),
- }
- src_node = self.create_path_node(src_args)
- LOG.debug('create src node: %s', src_node)
- path_nodes.append(src_node)
-
- # Create a destination node object for port chain
- dst_args = {
- 'tenant_id': port_chain['tenant_id'],
- 'node_type': ovs_const.DST_NODE,
- 'nsp': path_id,
- 'nsi': 0xff - sf_path_length - 1,
- 'portchain_id': port_chain['id'],
- 'status': ovs_const.STATUS_BUILDING,
- 'next_group_id': None,
- 'next_hop': None
- }
- dst_node = self.create_path_node(dst_args)
- LOG.debug('create dst node: %s', dst_node)
- path_nodes.append(dst_node)
-
- dst_ports = self._add_flowclassifier_port_assoc(
- port_chain['flow_classifiers'],
- port_chain['tenant_id'],
- src_node,
- dst_node
- )
-
- for i in range(sf_path_length):
- cur_group_members = next_group_members
- # next_group for next hop
- if i < sf_path_length - 1:
- next_group_intid, next_group_members = (
- self._get_portgroup_members(
- context, port_pair_groups[i + 1])
- )
- else:
- next_group_intid = None
- next_group_members = None if not dst_ports else dst_ports
-
- # Create a node object
- node_args = {
- 'tenant_id': port_chain['tenant_id'],
- 'node_type': ovs_const.SF_NODE,
- 'nsp': path_id,
- 'nsi': 0xfe - i,
- 'portchain_id': port_chain['id'],
- 'status': ovs_const.STATUS_BUILDING,
- 'next_group_id': next_group_intid,
- 'next_hop': (
- None if not next_group_members else
- jsonutils.dumps(next_group_members)
- )
- }
- sf_node = self.create_path_node(node_args)
- LOG.debug('chain path node: %s', sf_node)
- # Create the assocation objects that combine the pathnode_id with
- # the ingress of the port_pairs in the current group
- # when port_group does not reach tail
- for member in cur_group_members:
- assco_args = {'portpair_id': member['portpair_id'],
- 'pathnode_id': sf_node['id'],
- 'weight': member['weight'], }
- sfna = self.create_pathport_assoc(assco_args)
- LOG.debug('create assoc port with node: %s', sfna)
- sf_node['portpair_details'].append(member['portpair_id'])
- path_nodes.append(sf_node)
-
- return path_nodes
-
- def _delete_path_node_port_flowrule(self, node, port, fc_ids):
- # if this port is not binding, don't to generate flow rule
- if not port['host_id']:
- return
- flow_rule = self._build_portchain_flowrule_body(
- node,
- port,
- None,
- fc_ids)
-
- self.ovs_driver_rpc.ask_agent_to_delete_flow_rules(
- self.admin_context,
- flow_rule)
-
- def _delete_path_node_flowrule(self, node, fc_ids):
- for each in node['portpair_details']:
- port = self.get_port_detail_by_filter(dict(id=each))
- if port:
- self._delete_path_node_port_flowrule(
- node, port, fc_ids)
-
- @log_helpers.log_method_call
- def _delete_portchain_path(self, context, portchain_id):
- port_chain = context.current
- first = self.get_path_node_by_filter(
- filters={
- 'portchain_id': portchain_id,
- 'nsi': 0xff
- }
- )
-
- # delete flow rules which source port isn't assigned
- # in flow classifier
- if first:
- self._delete_src_node_flowrules(
- first,
- port_chain['flow_classifiers']
- )
-
- pds = self.get_path_nodes_by_filter(
- dict(portchain_id=portchain_id))
- if pds:
- for pd in pds:
- self._delete_path_node_flowrule(
- pd,
- port_chain['flow_classifiers']
- )
- self.delete_path_node(pd['id'])
-
- # delete the ports on the traffic classifier
- self._remove_flowclassifier_port_assoc(
- port_chain['flow_classifiers'],
- port_chain['tenant_id']
- )
-
- # Delete the chainpathpair
- intid = self.id_pool.get_intid_by_uuid(
- 'portchain', portchain_id)
- self.id_pool.release_intid('portchain', intid)
-
- def _update_path_node_next_hops(self, flow_rule):
- node_next_hops = []
- if not flow_rule['next_hop']:
- return None
- next_hops = jsonutils.loads(flow_rule['next_hop'])
- if not next_hops:
- return None
- for member in next_hops:
- detail = {}
- port_detail = self.get_port_detail_by_filter(
- dict(id=member['portpair_id']))
- if not port_detail or not port_detail['host_id']:
- continue
- detail['local_endpoint'] = port_detail['local_endpoint']
- detail['weight'] = member['weight']
- detail['mac_address'] = port_detail['mac_address']
- detail['ingress'] = port_detail['ingress']
- node_next_hops.append(detail)
-
- mac, cidr, net_uuid = self._get_port_subnet_gw_info_by_port_id(
- detail['ingress']
- )
-
- detail['gw_mac'] = mac
- detail['cidr'] = cidr
- detail['net_uuid'] = net_uuid
-
- flow_rule['next_hops'] = node_next_hops
- flow_rule.pop('next_hop')
-
- return node_next_hops
-
- def _build_portchain_flowrule_body(self, node, port,
- add_fc_ids=None, del_fc_ids=None):
- node_info = node.copy()
- node_info.pop('tenant_id')
- node_info.pop('portpair_details')
-
- port_info = port.copy()
- port_info.pop('tenant_id')
- port_info.pop('id')
- port_info.pop('path_nodes')
- port_info.pop('host_id')
-
- flow_rule = dict(node_info, **port_info)
- # if this port is belong to NSH/MPLS-aware vm, only to
- # notify the flow classifier for 1st SF.
- flow_rule['add_fcs'] = self._filter_flow_classifiers(
- flow_rule, add_fc_ids)
- flow_rule['del_fcs'] = self._filter_flow_classifiers(
- flow_rule, del_fc_ids)
-
- self._update_portchain_group_reference_count(flow_rule,
- port['host_id'])
-
- # update next hop info
- self._update_path_node_next_hops(flow_rule)
-
- return flow_rule
-
- def _filter_flow_classifiers(self, flow_rule, fc_ids):
- """Filter flow classifiers.
-
- @return: list of the flow classifiers
- """
-
- fc_return = []
-
- if not fc_ids:
- return fc_return
- fcs = self._get_fcs_by_ids(fc_ids)
- for fc in fcs:
- new_fc = fc.copy()
- new_fc.pop('id')
- new_fc.pop('name')
- new_fc.pop('tenant_id')
- new_fc.pop('description')
-
- if ((flow_rule['node_type'] == ovs_const.SRC_NODE and
- flow_rule['egress'] == fc['logical_source_port']
- ) or
- (flow_rule['node_type'] == ovs_const.DST_NODE and
- flow_rule['ingress'] == fc['logical_destination_port']
- )):
- fc_return.append(new_fc)
- elif flow_rule['node_type'] == ovs_const.SF_NODE:
- fc_return.append(new_fc)
-
- return fc_return
-
- def _update_path_node_port_flowrules(self, node, port,
- add_fc_ids=None, del_fc_ids=None):
- # if this port is not binding, don't to generate flow rule
- if not port['host_id']:
- return
-
- flow_rule = self._build_portchain_flowrule_body(
- node,
- port,
- add_fc_ids,
- del_fc_ids)
-
- self.ovs_driver_rpc.ask_agent_to_update_flow_rules(
- self.admin_context,
- flow_rule)
-
- def _update_path_node_flowrules(self, node,
- add_fc_ids=None, del_fc_ids=None):
- if node['portpair_details'] is None:
- return
- for each in node['portpair_details']:
- port = self.get_port_detail_by_filter(dict(id=each))
- if port:
- self._update_path_node_port_flowrules(
- node, port, add_fc_ids, del_fc_ids)
-
- def _thread_update_path_nodes(self, nodes,
- add_fc_ids=None, del_fc_ids=None):
- for node in nodes:
- self._update_path_node_flowrules(node, add_fc_ids, del_fc_ids)
- self._update_src_node_flowrules(nodes[0], add_fc_ids, del_fc_ids)
-
- def _get_portchain_fcs(self, port_chain):
- return self._get_fcs_by_ids(port_chain['flow_classifiers'])
-
- def _get_fcs_by_ids(self, fc_ids):
- flow_classifiers = []
- if not fc_ids:
- return flow_classifiers
-
- # Get the portchain flow classifiers
- fc_plugin = (
- manager.NeutronManager.get_service_plugins().get(
- flowclassifier.FLOW_CLASSIFIER_EXT)
- )
- if not fc_plugin:
- LOG.warn(_LW("Not found the flow classifier service plugin"))
- return flow_classifiers
-
- for fc_id in fc_ids:
- fc = fc_plugin.get_flow_classifier(self.admin_context, fc_id)
- flow_classifiers.append(fc)
-
- return flow_classifiers
-
- @log_helpers.log_method_call
- def create_port_chain(self, context):
- port_chain = context.current
- path_nodes = self._create_portchain_path(context, port_chain)
-
- # notify agent with async thread
- # current we don't use greenthread.spawn
- self._thread_update_path_nodes(
- path_nodes,
- port_chain['flow_classifiers'],
- None)
-
- @log_helpers.log_method_call
- def delete_port_chain(self, context):
- port_chain = context.current
- portchain_id = port_chain['id']
- LOG.debug("to delete portchain path")
- self._delete_portchain_path(context, portchain_id)
-
- def _get_diff_set(self, orig, cur):
- orig_set = set(item for item in orig)
- cur_set = set(item for item in cur)
-
- to_del = orig_set.difference(cur_set)
- to_add = cur_set.difference(orig_set)
-
- return to_del, to_add
-
- @log_helpers.log_method_call
- def update_port_chain(self, context):
- port_chain = context.current
- orig = context.original
-
- del_fc_ids, add_fc_ids = self._get_diff_set(
- orig['flow_classifiers'],
- port_chain['flow_classifiers']
- )
- path_nodes = self.get_path_nodes_by_filter(
- dict(portchain_id=port_chain['id'])
- )
- if not path_nodes:
- return
-
- sort_path_nodes = sorted(path_nodes,
- key=lambda x: x['nsi'],
- reverse=True)
- if del_fc_ids:
- self._thread_update_path_nodes(sort_path_nodes,
- None,
- del_fc_ids)
- self._remove_flowclassifier_port_assoc(del_fc_ids,
- port_chain['tenant_id'],
- sort_path_nodes[0],
- sort_path_nodes[-1],
- sort_path_nodes[-2])
-
- if add_fc_ids:
- self._add_flowclassifier_port_assoc(add_fc_ids,
- port_chain['tenant_id'],
- sort_path_nodes[0],
- sort_path_nodes[-1],
- sort_path_nodes[-2])
-
- # notify agent with async thread
- # current we don't use greenthread.spawn
- self._thread_update_path_nodes(sort_path_nodes,
- add_fc_ids,
- None)
-
- @log_helpers.log_method_call
- def create_port_pair_group(self, context):
- group = context.current
- self.id_pool.assign_intid('group', group['id'])
-
- @log_helpers.log_method_call
- def delete_port_pair_group(self, context):
- group = context.current
- group_intid = self.id_pool.get_intid_by_uuid('group', group['id'])
- if group_intid:
- self.id_pool.release_intid('group', group_intid)
-
- @log_helpers.log_method_call
- def update_port_pair_group(self, context):
- current = context.current
- original = context.original
-
- if set(current['port_pairs']) == set(original['port_pairs']):
- return
-
- # Update the path_nodes and flows for each port chain that
- # contains this port_pair_group
- # Note: _get_port_pair_group is temporarily used here.
- ppg_obj = context._plugin._get_port_pair_group(context._plugin_context,
- current['id'])
- port_chains = [assoc.portchain_id for assoc in
- ppg_obj.chain_group_associations]
-
- for chain_id in port_chains:
- port_chain = context._plugin.get_port_chain(
- context._plugin_context, chain_id)
- group_intid = self.id_pool.get_intid_by_uuid('group',
- current['id'])
- # Get the previous node
- prev_node = self.get_path_node_by_filter(
- filters={'portchain_id': chain_id,
- 'next_group_id': group_intid})
- if not prev_node:
- continue
-
- before_update_prev_node = prev_node.copy()
- # Update the previous node
- _, curr_group_members = self._get_portgroup_members(context,
- current['id'])
- prev_node['next_hop'] = (
- jsonutils.dumps(curr_group_members)
- if curr_group_members else None
- )
- # update next hop to database
- self.update_path_node(prev_node['id'], prev_node)
- if prev_node['node_type'] == ovs_const.SRC_NODE:
- self._delete_src_node_flowrules(
- before_update_prev_node, port_chain['flow_classifiers'])
- self._update_src_node_flowrules(
- prev_node, port_chain['flow_classifiers'], None)
- else:
- self._delete_path_node_flowrule(
- before_update_prev_node, port_chain['flow_classifiers'])
- self._update_path_node_flowrules(
- prev_node, port_chain['flow_classifiers'], None)
-
- # Update the current node
- # to find the current node by using the node's next_group_id
- # if this node is the last, next_group_id would be None
- curr_pos = port_chain['port_pair_groups'].index(current['id'])
- curr_node = self.get_path_node_by_filter(
- filters={'portchain_id': chain_id,
- 'nsi': 0xfe - curr_pos})
- if not curr_node:
- continue
-
- # Add the port-pair-details into the current node
- for pp_id in (
- set(current['port_pairs']) - set(original['port_pairs'])
- ):
- ppd = self._get_port_pair_detail_by_port_pair(context,
- pp_id)
- if not ppd:
- LOG.debug('No port_pair_detail for the port_pair: %s',
- pp_id)
- LOG.debug("Failed to update port-pair-group")
- return
-
- assco_args = {'portpair_id': ppd['id'],
- 'pathnode_id': curr_node['id'],
- 'weight': 1, }
- self.create_pathport_assoc(assco_args)
- self._update_path_node_port_flowrules(
- curr_node, ppd, port_chain['flow_classifiers'])
-
- # Delete the port-pair-details from the current node
- for pp_id in (
- set(original['port_pairs']) - set(current['port_pairs'])
- ):
- ppd = self._get_port_pair_detail_by_port_pair(context,
- pp_id)
- if not ppd:
- LOG.debug('No port_pair_detail for the port_pair: %s',
- pp_id)
- LOG.debug("Failed to update port-pair-group")
- return
- self._delete_path_node_port_flowrule(
- curr_node, ppd, port_chain['flow_classifiers'])
- self.delete_pathport_assoc(curr_node['id'], ppd['id'])
-
- @log_helpers.log_method_call
- def _get_portpair_detail_info(self, portpair_id):
- """Get port detail.
-
- @param: portpair_id: uuid
- @return: (host_id, local_ip, network_type, segment_id,
- service_insert_type): tuple
- """
-
- core_plugin = manager.NeutronManager.get_plugin()
- port_detail = core_plugin.get_port(self.admin_context, portpair_id)
- host_id, local_ip, network_type, segment_id, mac_address = (
- (None, ) * 5)
-
- if port_detail:
- host_id = port_detail['binding:host_id']
- network_id = port_detail['network_id']
- mac_address = port_detail['mac_address']
- network_info = core_plugin.get_network(
- self.admin_context, network_id)
- network_type = network_info['provider:network_type']
- segment_id = network_info['provider:segmentation_id']
-
- if (
- host_id and
- network_type in [np_const.TYPE_GRE, np_const.TYPE_VXLAN]
- ):
- driver = core_plugin.type_manager.drivers.get(network_type)
- host_endpoint = driver.obj.get_endpoint_by_host(host_id)
- local_ip = host_endpoint['ip_address']
-
- return host_id, local_ip, network_type, segment_id, mac_address
-
- @log_helpers.log_method_call
- def _create_port_detail(self, port_pair):
- # since first node may not assign the ingress port, and last node may
- # not assign the egress port. we use one of the
- # port as the key to get the SF information.
- port = None
- if port_pair.get('ingress', None):
- port = port_pair['ingress']
- elif port_pair.get('egress', None):
- port = port_pair['egress']
-
- host_id, local_endpoint, network_type, segment_id, mac_address = (
- self._get_portpair_detail_info(port))
- port_detail = {
- 'ingress': port_pair.get('ingress', None),
- 'egress': port_pair.get('egress', None),
- 'tenant_id': port_pair['tenant_id'],
- 'host_id': host_id,
- 'segment_id': segment_id,
- 'network_type': network_type,
- 'local_endpoint': local_endpoint,
- 'mac_address': mac_address
- }
- r = self.create_port_detail(port_detail)
- LOG.debug('create port detail: %s', r)
- return r
-
- @log_helpers.log_method_call
- def create_port_pair(self, context):
- port_pair = context.current
- self._create_port_detail(port_pair)
-
- @log_helpers.log_method_call
- def delete_port_pair(self, context):
- port_pair = context.current
-
- pd_filter = dict(ingress=port_pair.get('ingress', None),
- egress=port_pair.get('egress', None),
- tenant_id=port_pair['tenant_id']
- )
- pds = self.get_port_details_by_filter(pd_filter)
- if pds:
- for pd in pds:
- self.delete_port_detail(pd['id'])
-
- @log_helpers.log_method_call
- def update_port_pair(self, context):
- pass
-
- def get_flowrules_by_host_portid(self, context, host, port_id):
- port_chain_flowrules = []
- sfc_plugin = (
- manager.NeutronManager.get_service_plugins().get(
- sfc.SFC_EXT
- )
- )
- if not sfc_plugin:
- return port_chain_flowrules
- try:
- port_detail_list = []
- # one port only may be in egress/ingress port once time.
- ingress_port = self.get_port_detail_by_filter(
- dict(ingress=port_id))
- egress_port = self.get_port_detail_by_filter(
- dict(egress=port_id))
- if not ingress_port and not egress_port:
- return None
- # SF migrate to other host
- if ingress_port:
- port_detail_list.append(ingress_port)
- if ingress_port['host_id'] != host:
- ingress_port.update(dict(host_id=host))
-
- if egress_port:
- port_detail_list.append(egress_port)
- if egress_port['host_id'] != host:
- egress_port.update(dict(host_id=host))
-
- # this is a SF if there are both egress and engress.
- for i, ports in enumerate(port_detail_list):
- nodes_assocs = ports['path_nodes']
- for assoc in nodes_assocs:
- # update current path flow rule
- node = self.get_path_node(assoc['pathnode_id'])
- port_chain = sfc_plugin.get_port_chain(
- context,
- node['portchain_id'])
- flow_rule = self._build_portchain_flowrule_body(
- node,
- ports,
- add_fc_ids=port_chain['flow_classifiers']
- )
- port_chain_flowrules.append(flow_rule)
-
- # update the pre-path node flow rule
- # if node['node_type'] != ovs_const.SRC_NODE:
- # node_filter = dict(nsp=node['nsp'],
- # nsi=node['nsi'] + 1
- # )
- # pre_node_list = self.get_path_nodes_by_filter(
- # node_filter)
- # if not pre_node_list:
- # continue
- # for pre_node in pre_node_list:
- # self._update_path_node_flowrules(
- # pre_node,
- # add_fc_ids=port_chain['flow_classifiers'])
-
- return port_chain_flowrules
-
- except Exception as e:
- LOG.exception(e)
- LOG.error(_LE("get_flowrules_by_host_portid failed"))
-
- def get_flow_classifier_by_portchain_id(self, context, portchain_id):
- try:
- flow_classifier_list = []
- sfc_plugin = (
- manager.NeutronManager.get_service_plugins().get(
- sfc.SFC_EXT
- )
- )
- if not sfc_plugin:
- return []
-
- port_chain = sfc_plugin.get_port_chain(
- context,
- portchain_id)
- flow_classifier_list = self._get_portchain_fcs(port_chain)
- return flow_classifier_list
- except Exception as e:
- LOG.exception(e)
- LOG.error(_LE("get_flow_classifier_by_portchain_id failed"))
-
- def update_flowrule_status(self, context, id, status):
- try:
- flowrule_status = dict(status=status)
- self.update_path_node(id, flowrule_status)
- except Exception as e:
- LOG.exception(e)
- LOG.error(_LE("update_flowrule_status failed"))
-
- def _update_src_node_flowrules(self, node,
- add_fc_ids=None, del_fc_ids=None):
- flow_rule = self._get_portchain_src_node_flowrule(node,
- add_fc_ids,
- del_fc_ids)
- if not flow_rule:
- return
-
- core_plugin = manager.NeutronManager.get_plugin()
- pc_agents = core_plugin.get_agents(
- self.admin_context,
- filters={'agent_type': [nc_const.AGENT_TYPE_OVS]})
- if not pc_agents:
- return
-
- for agent in pc_agents:
- if agent['alive']:
- # update host info to flow rule
- flow_rule['host'] = agent['host']
- self.ovs_driver_rpc.ask_agent_to_update_src_node_flow_rules(
- self.admin_context,
- flow_rule)
-
- def _delete_src_node_flowrules(self, node, del_fc_ids=None):
- flow_rule = self._get_portchain_src_node_flowrule(node,
- None, del_fc_ids)
- if not flow_rule:
- return
-
- core_plugin = manager.NeutronManager.get_plugin()
- pc_agents = core_plugin.get_agents(
- self.admin_context, filters={
- 'agent_type': [nc_const.AGENT_TYPE_OVS]})
- if not pc_agents:
- return
-
- for agent in pc_agents:
- if agent['alive']:
- # update host info to flow rule
- self._update_portchain_group_reference_count(flow_rule,
- agent['host'])
- self.ovs_driver_rpc.ask_agent_to_delete_src_node_flow_rules(
- self.admin_context,
- flow_rule)
-
- def get_all_src_node_flowrules(self, context):
- sfc_plugin = (
- manager.NeutronManager.get_service_plugins().get(
- sfc.SFC_EXT
- )
- )
- if not sfc_plugin:
- return []
- try:
- frs = []
- port_chains = sfc_plugin.get_port_chains(context)
-
- for port_chain in port_chains:
- # get the first node of this chain
- node_filters = dict(portchain_id=port_chain['id'], nsi=0xff)
- portchain_node = self.get_path_node_by_filter(node_filters)
- if not portchain_node:
- continue
- flow_rule = self._get_portchain_src_node_flowrule(
- portchain_node,
- port_chain['flow_classifiers']
- )
- if not flow_rule:
- continue
- frs.append(flow_rule)
- return frs
- except Exception as e:
- LOG.exception(e)
- LOG.error(_LE("get_all_src_node_flowrules failed"))
-
- def _get_portchain_src_node_flowrule(self, node,
- add_fc_ids=None, del_fc_ids=None):
- try:
- add_fc_rt = []
- del_fc_rt = []
-
- if add_fc_ids:
- for fc in self._get_fcs_by_ids(add_fc_ids):
- if not fc.get('logical_source_port', None):
- add_fc_rt.append(fc)
-
- if del_fc_ids:
- for fc in self._get_fcs_by_ids(del_fc_ids):
- if not fc.get('logical_source_port', None):
- del_fc_rt.append(fc)
-
- if not add_fc_rt and not del_fc_rt:
- return None
-
- return self._build_portchain_flowrule_body_without_port(
- node, add_fc_rt, del_fc_rt)
- except Exception as e:
- LOG.exception(e)
- LOG.error(_LE("_get_portchain_src_node_flowrule failed"))
-
- def _update_portchain_group_reference_count(self, flow_rule, host):
- group_refcnt = 0
- flow_rule['host'] = host
-
- if flow_rule['next_group_id'] is not None:
- all_nodes = self.get_path_nodes_by_filter(
- filters={'next_group_id': flow_rule['next_group_id'],
- 'nsi': 0xff})
- if all_nodes is not None:
- for node in all_nodes:
- if not node['portpair_details']:
- group_refcnt += 1
-
- port_details = self.get_port_details_by_filter(
- dict(host_id=flow_rule['host']))
- if port_details is not None:
- for pd in port_details:
- for path in pd['path_nodes']:
- path_node = self.get_path_node(path['pathnode_id'])
- if (
- path_node['next_group_id'] ==
- flow_rule['next_group_id']
- ):
- group_refcnt += 1
-
- flow_rule['group_refcnt'] = group_refcnt
-
- return group_refcnt
-
- def _build_portchain_flowrule_body_without_port(self,
- node,
- add_fcs=None,
- del_fcs=None):
- flow_rule = node.copy()
- flow_rule.pop('tenant_id')
- flow_rule.pop('portpair_details')
-
- # according to the first sf node get network information
- if not node['next_hop']:
- return None
-
- next_hops = jsonutils.loads(node['next_hop'])
- if not next_hops:
- return None
-
- port_detail = self.get_port_detail_by_filter(
- dict(id=next_hops[0]['portpair_id']))
- if not port_detail:
- return None
-
- flow_rule['ingress'] = None
- flow_rule['egress'] = None
- flow_rule['network_type'] = port_detail['network_type']
- flow_rule['segment_id'] = port_detail['segment_id']
-
- flow_rule['add_fcs'] = add_fcs
- flow_rule['del_fcs'] = del_fcs
-
- # update next hop info
- self._update_path_node_next_hops(flow_rule)
- return flow_rule
diff --git a/networking_sfc/services/sfc/drivers/ovs/rpc.py b/networking_sfc/services/sfc/drivers/ovs/rpc.py
deleted file mode 100644
index a5ac0bc..0000000
--- a/networking_sfc/services/sfc/drivers/ovs/rpc.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from networking_sfc.services.sfc.drivers.ovs import rpc_topics as sfc_topics
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.i18n import _LI
-
-from oslo_log import log as logging
-import oslo_messaging
-
-LOG = logging.getLogger(__name__)
-
-
-class SfcRpcCallback(object):
- """Sfc RPC server."""
-
- def __init__(self, driver):
- self.target = oslo_messaging.Target(version='1.0')
- self.driver = driver
-
- def get_flowrules_by_host_portid(self, context, **kwargs):
- host = kwargs.get('host')
- port_id = kwargs.get('port_id')
- LOG.debug('from port-chain service plugin')
- pcfrs = self.driver.get_flowrules_by_host_portid(
- context, host, port_id)
- LOG.debug('host: %s, port_id: %s', host, port_id)
- return pcfrs
-
- def get_flow_classifier_by_portchain_id(self, context, **kwargs):
- portchain_id = kwargs.get('portchain_id')
- pcfcs = self.driver.get_flow_classifier_by_portchain_id(
- context,
- portchain_id)
- LOG.debug('portchain id: %s', portchain_id)
- return pcfcs
-
- def get_all_src_node_flowrules(self, context, **kwargs):
- host = kwargs.get('host')
- pcfcs = self.driver.get_all_src_node_flowrules(
- context)
- LOG.debug('portchain get_src_node_flowrules, host: %s', host)
- return pcfcs
-
- def update_flowrules_status(self, context, **kwargs):
- flowrules_status = kwargs.get('flowrules_status')
- LOG.info(_LI('update_flowrules_status: %s'), flowrules_status)
- for flowrule_dict in flowrules_status:
- self.driver.update_flowrule_status(
- context, flowrule_dict['id'], flowrule_dict['status'])
-
-
-class SfcAgentRpcClient(object):
- """RPC client for ovs sfc agent."""
-
- def __init__(self, topic=sfc_topics.SFC_AGENT):
- self.topic = topic
- target = oslo_messaging.Target(topic=topic, version='1.0')
- self.client = n_rpc.get_client(target)
-
- def ask_agent_to_update_flow_rules(self, context, flows):
- LOG.debug('Ask agent on the specific host to update flows ')
- LOG.debug('flows: %s', flows)
- host = flows.get('host')
- cctxt = self.client.prepare(
- topic=topics.get_topic_name(
- self.topic, sfc_topics.PORTFLOW, topics.UPDATE),
- server=host)
- cctxt.cast(context, 'update_flow_rules', flowrule_entries=flows)
-
- def ask_agent_to_delete_flow_rules(self, context, flows):
- LOG.debug('Ask agent on the specific host to delete flows ')
- LOG.debug('flows: %s', flows)
- host = flows.get('host')
- cctxt = self.client.prepare(
- topic=topics.get_topic_name(
- self.topic, sfc_topics.PORTFLOW, topics.DELETE),
- server=host)
- cctxt.cast(context, 'delete_flow_rules', flowrule_entries=flows)
-
- def ask_agent_to_update_src_node_flow_rules(self, context, flows):
- LOG.debug('Ask agent on the specific host to update src node flows ')
- LOG.debug('flows: %s', flows)
- host = flows.get('host')
- cctxt = self.client.prepare(
- topic=topics.get_topic_name(
- self.topic, sfc_topics.PORTFLOW, topics.UPDATE),
- server=host)
- cctxt.cast(context, 'update_src_node_flow_rules',
- flowrule_entries=flows)
-
- def ask_agent_to_delete_src_node_flow_rules(self, context, flows):
- LOG.debug('Ask agent on the specific host to delete src node flows')
- LOG.debug('flows: %s', flows)
- host = flows.get('host')
- cctxt = self.client.prepare(
- topic=topics.get_topic_name(
- self.topic, sfc_topics.PORTFLOW, topics.DELETE),
- server=host)
- cctxt.cast(context, 'delete_src_node_flow_rules',
- flowrule_entries=flows)
diff --git a/networking_sfc/services/sfc/drivers/ovs/rpc_topics.py b/networking_sfc/services/sfc/drivers/ovs/rpc_topics.py
deleted file mode 100644
index a35ff4f..0000000
--- a/networking_sfc/services/sfc/drivers/ovs/rpc_topics.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-AGENT = 'q-agent-notifier'
-
-SFC_PLUGIN = 'q-sfc-plugin'
-SFC_AGENT = 'q-sfc-agent'
-SFC_FLOW = 'q-sfc-flow'
-
-PORTFLOW = 'portflowrule'
diff --git a/networking_sfc/services/sfc/plugin.py b/networking_sfc/services/sfc/plugin.py
deleted file mode 100644
index f41c8e1..0000000
--- a/networking_sfc/services/sfc/plugin.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import helpers as log_helpers
-from oslo_log import log as logging
-from oslo_utils import excutils
-
-from neutron.i18n import _LE
-
-from networking_sfc.db import sfc_db
-from networking_sfc.extensions import sfc as sfc_ext
-from networking_sfc.services.sfc.common import context as sfc_ctx
-from networking_sfc.services.sfc.common import exceptions as sfc_exc
-from networking_sfc.services.sfc import driver_manager as sfc_driver
-
-
-LOG = logging.getLogger(__name__)
-
-
-class SfcPlugin(sfc_db.SfcDbPlugin):
- """SFC plugin implementation."""
-
- supported_extension_aliases = [sfc_ext.SFC_EXT]
- path_prefix = sfc_ext.SFC_PREFIX
-
- def __init__(self):
- self.driver_manager = sfc_driver.SfcDriverManager()
- super(SfcPlugin, self).__init__()
- self.driver_manager.initialize()
-
- @log_helpers.log_method_call
- def create_port_chain(self, context, port_chain):
- port_chain_db = super(SfcPlugin, self).create_port_chain(
- context, port_chain)
- portchain_db_context = sfc_ctx.PortChainContext(
- self, context, port_chain_db)
- try:
- self.driver_manager.create_port_chain(portchain_db_context)
- except sfc_exc.SfcDriverError as e:
- LOG.exception(e)
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Create port chain failed, "
- "deleting port_chain '%s'"),
- port_chain_db['id'])
- self.delete_port_chain(context, port_chain_db['id'])
-
- return port_chain_db
-
- @log_helpers.log_method_call
- def update_port_chain(self, context, portchain_id, port_chain):
- original_portchain = self.get_port_chain(context, portchain_id)
- updated_portchain = super(SfcPlugin, self).update_port_chain(
- context, portchain_id, port_chain)
- portchain_db_context = sfc_ctx.PortChainContext(
- self, context, updated_portchain,
- original_portchain=original_portchain)
-
- try:
- self.driver_manager.update_port_chain(portchain_db_context)
- except sfc_exc.SfcDriverError as e:
- LOG.exception(e)
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Update port chain failed, port_chain '%s'"),
- updated_portchain['id'])
-
- # TODO(qijing): should we rollback the database update here?
- return updated_portchain
-
- @log_helpers.log_method_call
- def delete_port_chain(self, context, portchain_id):
- pc = self.get_port_chain(context, portchain_id)
- pc_context = sfc_ctx.PortChainContext(self, context, pc)
- try:
- self.driver_manager.delete_port_chain(pc_context)
- except sfc_exc.SfcDriverError as e:
- LOG.exception(e)
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Delete port chain failed, portchain '%s'"),
- portchain_id)
- # TODO(qijing): unsync in case deleted in driver but fail in database
- super(SfcPlugin, self).delete_port_chain(context, portchain_id)
-
- @log_helpers.log_method_call
- def create_port_pair(self, context, port_pair):
- portpair_db = super(SfcPlugin, self).create_port_pair(
- context, port_pair)
- portpair_context = sfc_ctx.PortPairContext(
- self, context, portpair_db)
- try:
- self.driver_manager.create_port_pair(portpair_context)
- except sfc_exc.SfcDriverError as e:
- LOG.exception(e)
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Create port pair failed, "
- "deleting port_pair '%s'"),
- portpair_db['id'])
- self.delete_port_pair(context, portpair_db['id'])
-
- return portpair_db
-
- @log_helpers.log_method_call
- def update_port_pair(self, context, portpair_id, port_pair):
- original_portpair = self.get_port_pair(context, portpair_id)
- updated_portpair = super(SfcPlugin, self).update_port_pair(
- context, portpair_id, port_pair)
- portpair_context = sfc_ctx.PortPairContext(
- self, context, updated_portpair,
- original_portpair=original_portpair)
- try:
- self.driver_manager.update_port_pair(portpair_context)
- except sfc_exc.SfcDriverError as e:
- LOG.exception(e)
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Update port pair failed, port_pair '%s'"),
- updated_portpair['id'])
-
- return updated_portpair
-
- @log_helpers.log_method_call
- def delete_port_pair(self, context, portpair_id):
- portpair = self.get_port_pair(context, portpair_id)
- portpair_context = sfc_ctx.PortPairContext(
- self, context, portpair)
- try:
- self.driver_manager.delete_port_pair(portpair_context)
- except sfc_exc.SfcDriverError as e:
- LOG.exception(e)
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Delete port pair failed, port_pair '%s'"),
- portpair_id)
-
- super(SfcPlugin, self).delete_port_pair(context, portpair_id)
-
- @log_helpers.log_method_call
- def create_port_pair_group(self, context, port_pair_group):
- portpairgroup_db = super(SfcPlugin, self).create_port_pair_group(
- context, port_pair_group)
- portpairgroup_context = sfc_ctx.PortPairGroupContext(
- self, context, portpairgroup_db)
- try:
- self.driver_manager.create_port_pair_group(portpairgroup_context)
- except sfc_exc.SfcDriverError as e:
- LOG.exception(e)
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Create port pair group failed, "
- "deleting port_pair_group '%s'"),
- portpairgroup_db['id'])
- self.delete_port_pair_group(context, portpairgroup_db['id'])
-
- return portpairgroup_db
-
- @log_helpers.log_method_call
- def update_port_pair_group(
- self, context, portpairgroup_id, port_pair_group
- ):
- original_portpairgroup = self.get_port_pair_group(
- context, portpairgroup_id)
- updated_portpairgroup = super(SfcPlugin, self).update_port_pair_group(
- context, portpairgroup_id, port_pair_group)
- portpairgroup_context = sfc_ctx.PortPairGroupContext(
- self, context, updated_portpairgroup,
- original_portpairgroup=original_portpairgroup)
- try:
- self.driver_manager.update_port_pair_group(portpairgroup_context)
- except sfc_exc.SfcDriverError as e:
- LOG.exception(e)
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Update port pair group failed, "
- "port_pair_group '%s'"),
- updated_portpairgroup['id'])
-
- return updated_portpairgroup
-
- @log_helpers.log_method_call
- def delete_port_pair_group(self, context, portpairgroup_id):
- portpairgroup = self.get_port_pair_group(context, portpairgroup_id)
- portpairgroup_context = sfc_ctx.PortPairGroupContext(
- self, context, portpairgroup)
- try:
- self.driver_manager.delete_port_pair_group(portpairgroup_context)
- except sfc_exc.SfcDriverError as e:
- LOG.exception(e)
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("Delete port pair group failed, "
- "port_pair_group '%s'"),
- portpairgroup_id)
-
- super(SfcPlugin, self).delete_port_pair_group(context,
- portpairgroup_id)
diff --git a/networking_sfc/tests/__init__.py b/networking_sfc/tests/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/tests/__init__.py
+++ /dev/null
diff --git a/networking_sfc/tests/base.py b/networking_sfc/tests/base.py
deleted file mode 100644
index 83fa3c9..0000000
--- a/networking_sfc/tests/base.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import mock
-from oslo_utils import uuidutils
-
-from neutron.agent import securitygroups_rpc as sg_rpc
-from neutron.api import extensions as api_ext
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api as dhcp_rpc_log
-from neutron.api.v2 import resource as api_res_log
-from neutron.common import config as cfg
-from neutron.extensions import vlantransparent as vlan_log
-from neutron import manager
-from neutron.notifiers import nova as nova_log
-from neutron.plugins.ml2 import config
-from neutron.plugins.ml2 import db as ml2_db
-from neutron.plugins.ml2.drivers import type_flat
-from neutron.plugins.ml2.drivers import type_local
-from neutron.plugins.ml2.drivers import type_tunnel
-from neutron.plugins.ml2.drivers import type_vlan
-from neutron.plugins.ml2 import managers as ml2_manager
-from neutron.plugins.ml2 import plugin as ml2_plugin
-from neutron import quota as quota_log
-from neutron.scheduler import dhcp_agent_scheduler as dhcp_agent_log
-
-from neutron.tests import base as n_base
-from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin
-
-
-class BaseTestCase(n_base.BaseTestCase):
- pass
-
-
-class NeutronDbPluginV2TestCase(test_db_plugin.NeutronDbPluginV2TestCase):
- def setUp(self, plugin=None, service_plugins=None, ext_mgr=None):
- self._mock_unncessary_logging()
-
- if not plugin:
- plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
- config.cfg.CONF.set_override('tenant_network_types', ['gre'],
- group='ml2')
- config.cfg.CONF.set_override(
- 'tunnel_id_ranges', ['1:1000'], group='ml2_type_gre')
- config.cfg.CONF.set_override(
- 'mechanism_drivers', ['openvswitch'], group='ml2')
- super(NeutronDbPluginV2TestCase, self).setUp(
- ext_mgr=ext_mgr,
- plugin=plugin,
- service_plugins=service_plugins
- )
- self._tenant_id = uuidutils.generate_uuid()
- self._network = self._make_network(
- self.fmt, 'net1',
- True)
- self._subnet = self._make_subnet(
- self.fmt, self._network, gateway='10.0.0.1',
- cidr='10.0.0.0/24', ip_version=4
- )
-
- def _mock_unncessary_logging(self):
- mock_log_sg_rpc_p = mock.patch.object(sg_rpc, 'LOG')
- self.mock_log_sg_rpc = mock_log_sg_rpc_p.start()
-
- mock_log_api_ext_p = mock.patch.object(api_ext, 'LOG')
- self.mock_log_api_ext = mock_log_api_ext_p.start()
-
- mock_log_dhcp_rpc_log_p = mock.patch.object(dhcp_rpc_log, 'LOG')
- self.mock_log_dhcp_rpc_log = mock_log_dhcp_rpc_log_p.start()
-
- mock_log_dhcp_rpc_log_p = mock.patch.object(dhcp_rpc_log, 'LOG')
- self.mock_log_dhcp_rpc_log = mock_log_dhcp_rpc_log_p.start()
-
- mock_log_api_res_log_p = mock.patch.object(api_res_log, 'LOG')
- self.mock_log_api_res_log = mock_log_api_res_log_p.start()
-
- mock_log_cfg_p = mock.patch.object(cfg, 'LOG')
- self.mock_log_cfg = mock_log_cfg_p.start()
-
- mock_log_vlan_log_p = mock.patch.object(vlan_log, 'LOG')
- self.mock_log_vlan_log = mock_log_vlan_log_p.start()
-
- mock_log_manager_p = mock.patch.object(manager, 'LOG')
- self.mock_log_manager = mock_log_manager_p.start()
-
- mock_log_nova_p = mock.patch.object(nova_log, 'LOG')
- self.mock_log_nova = mock_log_nova_p.start()
-
- mock_log_ml2_db_p = mock.patch.object(ml2_db, 'LOG')
- self.mock_log_ml2_db = mock_log_ml2_db_p.start()
-
- mock_log_ml2_manager_p = mock.patch.object(ml2_manager, 'LOG')
- self.mock_log_ml2_manager = mock_log_ml2_manager_p.start()
-
- mock_log_plugin_p = mock.patch.object(ml2_plugin, 'LOG')
- self.mock_log_plugin = mock_log_plugin_p.start()
-
- mock_log_type_flat_p = mock.patch.object(type_flat, 'LOG')
- self.mock_log_type_flat = mock_log_type_flat_p.start()
-
- mock_log_type_local_p = mock.patch.object(type_local, 'LOG')
- self.mock_log_type_local = mock_log_type_local_p.start()
-
- mock_log_type_tunnel_p = mock.patch.object(type_tunnel, 'LOG')
- self.mock_log_type_tunnel = mock_log_type_tunnel_p.start()
-
- mock_log_type_vlan_p = mock.patch.object(type_vlan, 'LOG')
- self.mock_log_type_vlan = mock_log_type_vlan_p.start()
-
- mock_log_quota_log_p = mock.patch.object(quota_log, 'LOG')
- self.mock_log_quota_log = mock_log_quota_log_p.start()
-
- mock_log_dhcp_agent_log_p = mock.patch.object(dhcp_agent_log, 'LOG')
- self.mock_log_dhcp_agent_log = mock_log_dhcp_agent_log_p.start()
-
- def tearDown(self):
- super(NeutronDbPluginV2TestCase, self).tearDown()
-
- @contextlib.contextmanager
- def port(self, fmt=None, **kwargs):
- net_id = self._network['network']['id']
- port = self._make_port(fmt or self.fmt, net_id, **kwargs)
- yield port
diff --git a/networking_sfc/tests/unit/__init__.py b/networking_sfc/tests/unit/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/tests/unit/__init__.py
+++ /dev/null
diff --git a/networking_sfc/tests/unit/cli/__init__.py b/networking_sfc/tests/unit/cli/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/tests/unit/cli/__init__.py
+++ /dev/null
diff --git a/networking_sfc/tests/unit/cli/test_flow_classifier.py b/networking_sfc/tests/unit/cli/test_flow_classifier.py
deleted file mode 100644
index 37c2142..0000000
--- a/networking_sfc/tests/unit/cli/test_flow_classifier.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright 2015 Huawei Technologies India Pvt. Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import sys
-import uuid
-
-import mock
-
-from neutronclient import shell
-from neutronclient.tests.unit import test_cli20
-
-from networking_sfc.cli import flow_classifier as fc
-
-source_port_UUID = str(uuid.uuid4())
-destination_port_UUID = str(uuid.uuid4())
-
-
-class CLITestV20FCExtensionJSON(test_cli20.CLITestV20Base):
- def setUp(self):
- self._mock_extension_loading()
- super(CLITestV20FCExtensionJSON, self).setUp(plurals={})
- self.register_non_admin_status_resource('flow_classifier')
-
- def _create_patch(self, name, func=None):
- patcher = mock.patch(name)
- thing = patcher.start()
- self.addCleanup(patcher.stop)
- return thing
-
- def _mock_extension_loading(self):
- ext_pkg = 'neutronclient.common.extension'
- flow_classifier = self._create_patch(ext_pkg +
- '._discover_via_entry_points')
- flow_classifier.return_value = [("flow_classifier", fc)]
- return flow_classifier
-
- def test_ext_cmd_loaded(self):
- shell.NeutronShell('2.0')
- ext_cmd = {'flow-classifier-list': fc.FlowClassifierList,
- 'flow-classifier-create': fc.FlowClassifierCreate,
- 'flow-classifier-update': fc.FlowClassifierUpdate,
- 'flow-classifier-delete': fc.FlowClassifierDelete,
- 'flow-classifier-show': fc.FlowClassifierShow}
- self.assertDictContainsSubset(ext_cmd, shell.COMMANDS['2.0'])
-
- def test_create_flow_classifier_with_mandatory_params(self):
- """create flow-classifier: flow1."""
- resource = 'flow_classifier'
- cmd = fc.FlowClassifierCreate(test_cli20.MyApp(sys.stdout), None)
- myid = 'myid'
- name = 'flow1'
- ethertype = 'IPv4'
- args = [name, '--ethertype', ethertype]
- position_names = ['name', 'ethertype']
- position_values = [name, ethertype]
- self._test_create_resource(resource, cmd, name, myid, args,
- position_names, position_values)
-
- def test_create_flow_classifier_with_all_params(self):
- """create flow-classifier: flow1."""
- resource = 'flow_classifier'
- cmd = fc.FlowClassifierCreate(test_cli20.MyApp(sys.stdout), None)
- myid = 'myid'
- name = 'flow1'
- protocol_name = 'TCP'
- ethertype = 'IPv4'
- source_port = '0:65535'
- source_port_min = 0
- source_port_max = 65535
- destination_port = '1:65534'
- destination_port_min = 1
- destination_port_max = 65534
- source_ip = '192.168.1.0/24'
- destination_ip = '192.168.2.0/24'
- logical_source_port = '4a334cd4-fe9c-4fae-af4b-321c5e2eb051'
- logical_destination_port = '1278dcd4-459f-62ed-754b-87fc5e4a6751'
- description = 'my-desc'
- l7_param = "url=my_url"
- l7_param_expected = {"url": "my_url"}
- args = [name,
- '--protocol', protocol_name,
- '--ethertype', ethertype,
- '--source-port', source_port,
- '--destination-port', destination_port,
- '--source-ip-prefix', source_ip,
- '--destination-ip-prefix', destination_ip,
- '--logical-source-port', logical_source_port,
- '--logical-destination-port', logical_destination_port,
- '--description', description,
- '--l7-parameters', l7_param]
- position_names = ['name', 'protocol', 'ethertype',
- 'source_port_range_min', 'source_port_range_max',
- 'destination_port_range_min',
- 'destination_port_range_max',
- 'source_ip_prefix', 'destination_ip_prefix',
- 'logical_source_port', 'logical_destination_port',
- 'description', 'l7_parameters']
- position_values = [name, protocol_name, ethertype,
- source_port_min, source_port_max,
- destination_port_min, destination_port_max,
- source_ip, destination_ip, logical_source_port,
- logical_destination_port, description,
- l7_param_expected]
- self._test_create_resource(resource, cmd, name, myid, args,
- position_names, position_values)
-
- def test_list_flow_classifier(self):
- """List available flow-classifiers."""
- resources = "flow_classifiers"
- cmd = fc.FlowClassifierList(test_cli20.MyApp(sys.stdout), None)
- self._test_list_resources(resources, cmd, True)
-
- def test_list_flow_classifier_sort(self):
- """flow_classifier-list --sort-key name --sort-key id --sort-key asc
-
- --sort-key desc
- """
- resources = "flow_classifiers"
- cmd = fc.FlowClassifierList(test_cli20.MyApp(sys.stdout), None)
- self._test_list_resources(resources, cmd,
- sort_key=["name", "id"],
- sort_dir=["asc", "desc"])
-
- def test_list_flow_classifier_limit(self):
- """flow-classifier-list -P."""
- resources = "flow_classifiers"
- cmd = fc.FlowClassifierList(test_cli20.MyApp(sys.stdout), None)
- self._test_list_resources(resources, cmd, page_size=1000)
-
- def test_show_flow_classifier_id(self):
- """flow-classifier-show test_id."""
- resource = 'flow_classifier'
- cmd = fc.FlowClassifierShow(test_cli20.MyApp(sys.stdout), None)
- args = ['--fields', 'id', self.test_id]
- self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
-
- def test_show_flow_classifier_id_name(self):
- """flow-classifier-show ."""
- resource = 'flow_classifier'
- cmd = fc.FlowClassifierShow(test_cli20.MyApp(sys.stdout), None)
- args = ['--fields', 'id', '--fields', 'name', self.test_id]
- self._test_show_resource(resource, cmd, self.test_id,
- args, ['id', 'name'])
-
- def test_update_flow_classifier_description(self):
- """flow-classifier-update myid --name newname."""
- resource = 'flow_classifier'
- cmd = fc.FlowClassifierUpdate(test_cli20.MyApp(sys.stdout), None)
- myid = 'myid'
- args = [myid, '--description', 'flow_classifier1', '--description',
- 'flow_classifier2']
- updatefields = {'description': 'flow_classifier2'}
- self._test_update_resource(resource, cmd, myid, args, updatefields)
-
- def test_update_flow_classifier_name(self):
- """flow-classifier-update myid --protocol any."""
- resource = 'flow_classifier'
- cmd = fc.FlowClassifierUpdate(test_cli20.MyApp(sys.stdout), None)
- self._test_update_resource(resource, cmd, 'myid',
- ['myid', '--name', 'myname'],
- {'name': 'myname'})
-
- def test_delete_flow_classifer(self):
- """flow-classifier-delete my-id."""
- resource = 'flow_classifier'
- cmd = fc.FlowClassifierDelete(test_cli20.MyApp(sys.stdout), None)
- my_id = 'myid1'
- args = [my_id]
- self._test_delete_resource(resource, cmd, my_id, args)
diff --git a/networking_sfc/tests/unit/cli/test_port_chain.py b/networking_sfc/tests/unit/cli/test_port_chain.py
deleted file mode 100644
index 0d834bd..0000000
--- a/networking_sfc/tests/unit/cli/test_port_chain.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# Copyright 2015 Huawei Technologies India Pvt. Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import sys
-import uuid
-
-import mock
-
-from neutronclient import shell
-from neutronclient.tests.unit import test_cli20
-
-from networking_sfc.cli import port_chain as pc
-
-FAKE_port_pair_group1_UUID = str(uuid.uuid4())
-FAKE_port_pair_group2_UUID = str(uuid.uuid4())
-FAKE_FC1_UUID = str(uuid.uuid4())
-FAKE_FC2_UUID = str(uuid.uuid4())
-FAKE_PARAM1_UUID = str(uuid.uuid4())
-FAKE_PARAM2_UUID = str(uuid.uuid4())
-
-
-class CLITestV20PortChainExtensionJSON(test_cli20.CLITestV20Base):
- def setUp(self):
- self._mock_extension_loading()
- super(CLITestV20PortChainExtensionJSON, self).setUp()
- self.register_non_admin_status_resource('port_chain')
-
- def _create_patch(self, name, func=None):
- patcher = mock.patch(name)
- thing = patcher.start()
- self.addCleanup(patcher.stop)
- return thing
-
- def _mock_extension_loading(self):
- ext_pkg = 'neutronclient.common.extension'
- port_chain = self._create_patch(ext_pkg +
- '._discover_via_entry_points')
- port_chain.return_value = [("port_chain", pc)]
- return port_chain
-
- def test_ext_cmd_loaded(self):
- shell.NeutronShell('2.0')
- ext_cmd = {'port-chain-list': pc.PortChainList,
- 'port-chain-create': pc.PortChainCreate,
- 'port-chain-update': pc.PortChainUpdate,
- 'port-chain-delete': pc.PortChainDelete,
- 'port-chain-show': pc.PortChainShow}
- self.assertDictContainsSubset(ext_cmd, shell.COMMANDS['2.0'])
-
- def test_create_port_chain_with_mandatory_param(self):
- """Create port_chain: myname."""
- resource = 'port_chain'
- cmd = pc.PortChainCreate(test_cli20.MyApp(sys.stdout),
- None)
- name = 'myname'
- myid = 'myid'
- args = [name, '--port-pair-group', FAKE_port_pair_group1_UUID]
- position_names = ['name', 'port_pair_groups']
- position_values = [name, [FAKE_port_pair_group1_UUID]]
- self._test_create_resource(resource, cmd, name, myid, args,
- position_names, position_values)
-
- def test_create_port_chain_with_multiple_port_pair_group(self):
- """Create port_chain: myname."""
- resource = 'port_chain'
- cmd = pc.PortChainCreate(test_cli20.MyApp(sys.stdout), None)
- name = 'myname'
- myid = 'myid'
- args = [name, '--port-pair-group', FAKE_port_pair_group1_UUID,
- '--port-pair-group', FAKE_port_pair_group2_UUID]
- position_names = ['name', 'port_pair_groups']
- position_values = [name, [FAKE_port_pair_group1_UUID,
- FAKE_port_pair_group2_UUID]]
- self._test_create_resource(resource, cmd, name, myid, args,
- position_names, position_values)
-
- def test_create_port_chain_with_all_params(self):
- """Create port_chain: myname."""
- resource = 'port_chain'
- cmd = pc.PortChainCreate(test_cli20.MyApp(sys.stdout), None)
- name = 'myname'
- myid = 'myid'
- desc = 'check port chain cli'
- chain_parameter = "correlation=mpls"
- chain_parameter_expected = {"correlation": "mpls"}
- args = [name, '--description', desc, '--port-pair-group',
- FAKE_port_pair_group1_UUID, '--flow-classifier',
- FAKE_FC1_UUID, '--chain-parameters', chain_parameter]
- position_names = ['name', 'description', 'port_pair_groups',
- 'flow_classifiers', 'chain_parameters']
- position_values = [name, desc, [FAKE_port_pair_group1_UUID],
- [FAKE_FC1_UUID], chain_parameter_expected]
- self._test_create_resource(resource, cmd, name, myid, args,
- position_names, position_values)
-
- def test_create_port_chain_with_single_classifier(self):
- """Create port_chain: myname."""
- resource = 'port_chain'
- cmd = pc.PortChainCreate(test_cli20.MyApp(sys.stdout), None)
- name = 'myname'
- myid = 'myid'
- args = [name, '--port-pair-group', FAKE_port_pair_group1_UUID,
- '--flow-classifier', FAKE_FC1_UUID]
- position_names = ['name', 'port_pair_groups', 'flow_classifiers']
- position_values = [name, [FAKE_port_pair_group1_UUID], [FAKE_FC1_UUID]]
- self._test_create_resource(resource, cmd, name, myid, args,
- position_names, position_values)
-
- def test_create_port_chain_with_multiple_classifiers(self):
- """Create port_chain: myname."""
- resource = 'port_chain'
- cmd = pc.PortChainCreate(test_cli20.MyApp(sys.stdout), None)
- name = 'myname'
- myid = 'myid'
- args = [name, '--port-pair-group', FAKE_port_pair_group1_UUID,
- '--flow-classifier', FAKE_FC1_UUID,
- '--flow-classifier', FAKE_FC2_UUID]
- position_names = ['name', 'port_pair_groups', 'flow_classifiers']
- position_values = [name, [FAKE_port_pair_group1_UUID], [FAKE_FC1_UUID,
- FAKE_FC2_UUID]]
- self._test_create_resource(resource, cmd, name, myid, args,
- position_names, position_values)
-
- def test_update_port_chain(self):
- """Update port_chain: myid --name myname."""
- resource = 'port_chain'
- cmd = pc.PortChainUpdate(test_cli20.MyApp(sys.stdout), None)
- self._test_update_resource(resource, cmd, 'myid',
- ['myid', '--name', 'myname'],
- {'name': 'myname'})
-
- def test_update_port_chain_with_no_flow_classifier(self):
- """Update port_chain: myid --name myname --no-flow-classifier None."""
- resource = 'port_chain'
- cmd = pc.PortChainUpdate(test_cli20.MyApp(sys.stdout), None)
- self._test_update_resource(resource, cmd, 'myid',
- ['myid', '--name', 'myname',
- '--no-flow-classifier'],
- {'name': 'myname',
- 'flow_classifiers': []})
-
- def test_delete_port_chain(self):
- """Delete port-chain: myid."""
- resource = 'port_chain'
- cmd = pc.PortChainDelete(test_cli20.MyApp(sys.stdout), None)
- myid = 'myid'
- args = [myid]
- self._test_delete_resource(resource, cmd, myid, args)
-
- def test_list_port_chain(self):
- """List port_chain."""
- resources = 'port_chains'
- cmd = pc.PortChainList(test_cli20.MyApp(sys.stdout), None)
- self._test_list_resources(resources, cmd, True)
-
- def test_list_port_chains_sort(self):
- """List port_chains: --sort-key name --sort-key id --sort-key asc
-
- --sort-key desc
- """
- resources = "port_chains"
- cmd = pc.PortChainList(test_cli20.MyApp(sys.stdout), None)
- self._test_list_resources(resources, cmd,
- sort_key=["name", "id"],
- sort_dir=["asc", "desc"])
-
- def test_show_port_chain(self):
- """Show port-chain: --fields id --fields name myid."""
- resource = 'port_chain'
- cmd = pc.PortChainShow(test_cli20.MyApp(sys.stdout), None)
- args = ['--fields', 'id', '--fields', 'name', self.test_id]
- self._test_show_resource(resource, cmd, self.test_id,
- args, ['id', 'name'])
diff --git a/networking_sfc/tests/unit/cli/test_port_pair.py b/networking_sfc/tests/unit/cli/test_port_pair.py
deleted file mode 100644
index 9a302e2..0000000
--- a/networking_sfc/tests/unit/cli/test_port_pair.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright 2015 Huawei Technologies India Pvt. Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import sys
-import uuid
-
-import mock
-
-from neutronclient import shell
-from neutronclient.tests.unit import test_cli20
-
-from networking_sfc.cli import port_pair as pp
-
-ingress_port_UUID = str(uuid.uuid4())
-egress_port_UUID = str(uuid.uuid4())
-
-
-class CLITestV20PortPairExtensionJSON(test_cli20.CLITestV20Base):
- def setUp(self):
- self._mock_extension_loading()
- super(CLITestV20PortPairExtensionJSON, self).setUp()
- self.register_non_admin_status_resource('port_pair')
-
- def _create_patch(self, name, func=None):
- patcher = mock.patch(name)
- thing = patcher.start()
- self.addCleanup(patcher.stop)
- return thing
-
- def _mock_extension_loading(self):
- ext_pkg = 'neutronclient.common.extension'
- port_pair = self._create_patch(ext_pkg +
- '._discover_via_entry_points')
- port_pair.return_value = [("port_pair", pp)]
- return port_pair
-
- def test_ext_cmd_loaded(self):
- shell.NeutronShell('2.0')
- ext_cmd = {'port-pair-list': pp.PortPairList,
- 'port-pair-create': pp.PortPairCreate,
- 'port-pair-update': pp.PortPairUpdate,
- 'port-pair-delete': pp.PortPairDelete,
- 'port-pair-show': pp.PortPairShow}
- self.assertDictContainsSubset(ext_cmd, shell.COMMANDS['2.0'])
-
- def test_create_port_pair_with_mandatory_param(self):
- """Create port_pair: myname."""
- resource = 'port_pair'
- cmd = pp.PortPairCreate(test_cli20.MyApp(sys.stdout), None)
- name = 'myname'
- myid = 'myid'
- args = [name, '--ingress', ingress_port_UUID,
- '--egress', egress_port_UUID]
- position_names = ['name', 'ingress', 'egress']
- position_values = [name, ingress_port_UUID, egress_port_UUID]
- self._test_create_resource(resource, cmd, name, myid, args,
- position_names, position_values)
-
- def test_create_port_group_with_bidirectional_port(self):
- """Create port_pair: myname with bidirectional port."""
- resource = 'port_pair'
- cmd = pp.PortPairCreate(test_cli20.MyApp(sys.stdout), None)
- name = 'myname'
- myid = 'myid'
- args = [name, '--ingress', ingress_port_UUID,
- '--egress', ingress_port_UUID]
- position_names = ['name', 'ingress', 'egress']
- position_values = [name, ingress_port_UUID, ingress_port_UUID]
- self._test_create_resource(resource, cmd, name, myid, args,
- position_names, position_values)
-
- def test_create_port_pair_with_all_param(self):
- """Create port_pair: myname with all parameter"""
- resource = 'port_pair'
- cmd = pp.PortPairCreate(test_cli20.MyApp(sys.stdout),
- None)
- name = 'myname'
- myid = 'myid'
- desc = "my_port_pair"
- service_fn_param = 'correlation=None'
- service_fn_param_exp = {"correlation": "None"}
- args = [name, '--ingress', ingress_port_UUID,
- '--egress', egress_port_UUID, '--description', desc,
- '--service-function-parameters', service_fn_param]
- position_names = ['name', 'ingress', 'egress', 'description',
- 'service_function_parameters']
- position_values = [name, ingress_port_UUID, egress_port_UUID, desc,
- service_fn_param_exp]
- self._test_create_resource(resource, cmd, name, myid, args,
- position_names, position_values)
-
- def test_update_port_pair_description(self):
- """Update port_pair: myid --name myname."""
- resource = 'port_pair'
- desc1 = "My_New_Port_Pair"
- cmd = pp.PortPairUpdate(test_cli20.MyApp(sys.stdout), None)
- self._test_update_resource(resource, cmd, 'myid',
- ['myid', '--description', desc1],
- {'description': desc1})
-
- def test_update_port_pair_name(self):
- """Update port_pair: myid --name myname."""
- resource = 'port_pair'
- my_name = "My_New_Port_Pair"
- cmd = pp.PortPairUpdate(test_cli20.MyApp(sys.stdout), None)
- self._test_update_resource(resource, cmd, 'myid',
- ['myid', '--name', my_name],
- {'name': my_name})
-
- def test_delete_port_pair(self):
- """Delete port-pair: myid."""
- resource = 'port_pair'
- cmd = pp.PortPairDelete(test_cli20.MyApp(sys.stdout), None)
- myid = 'myid'
- args = [myid]
- self._test_delete_resource(resource, cmd, myid, args)
-
- def test_list_port_pair(self):
- """List port_pairs."""
- resources = 'port_pairs'
- cmd = pp.PortPairList(test_cli20.MyApp(sys.stdout), None)
- self._test_list_resources(resources, cmd, True)
-
- def test_list_port_pair_limit(self):
- """size (1000) limited list: port-pair -P."""
- resources = "port_pairs"
- cmd = pp.PortPairList(test_cli20.MyApp(sys.stdout), None)
- self._test_list_resources(resources, cmd, page_size=1000)
-
- def test_list_port_pairs_sort(self):
- """List port_pairs: --sort-key name --sort-key id --sort-key asc
-
- --sort-key desc
- """
- resources = "port_pairs"
- cmd = pp.PortPairList(test_cli20.MyApp(sys.stdout), None)
- self._test_list_resources(resources, cmd,
- sort_key=["name", "id"],
- sort_dir=["asc", "desc"])
-
- def test_show_port_pair(self):
- """Show port-pairs: --fields id --fields name myid."""
- resource = 'port_pair'
- cmd = pp.PortPairShow(test_cli20.MyApp(sys.stdout), None)
- args = ['--fields', 'id', '--fields', 'name', self.test_id]
- self._test_show_resource(resource, cmd, self.test_id,
- args, ['id', 'name'])
diff --git a/networking_sfc/tests/unit/cli/test_port_pair_group.py b/networking_sfc/tests/unit/cli/test_port_pair_group.py
deleted file mode 100644
index f610ef0..0000000
--- a/networking_sfc/tests/unit/cli/test_port_pair_group.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# Copyright 2015 Huawei Technologies India Pvt. Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import sys
-import uuid
-
-import mock
-
-from neutronclient import shell
-from neutronclient.tests.unit import test_cli20
-
-from networking_sfc.cli import port_pair_group as pg
-
-pp1 = str(uuid.uuid4())
-pp2 = str(uuid.uuid4())
-pp3 = str(uuid.uuid4())
-pp4 = str(uuid.uuid4())
-
-
-class CLITestV20PortGroupExtensionJSON(test_cli20.CLITestV20Base):
- def setUp(self):
- self._mock_extension_loading()
- super(CLITestV20PortGroupExtensionJSON, self).setUp()
- self.register_non_admin_status_resource('port_pair_group')
-
- def _create_patch(self, name, func=None):
- patcher = mock.patch(name)
- thing = patcher.start()
- self.addCleanup(patcher.stop)
- return thing
-
- def _mock_extension_loading(self):
- ext_pkg = 'neutronclient.common.extension'
- port_pair_group = self._create_patch(ext_pkg +
- '._discover_via_entry_points')
- port_pair_group.return_value = [("port_pair_group", pg)]
- return port_pair_group
-
- def test_ext_cmd_loaded(self):
- shell.NeutronShell('2.0')
- ext_cmd = {'port-pair-group-list': pg.PortPairGroupList,
- 'port-pair-group-create': pg.PortPairGroupCreate,
- 'port-pair-group-update': pg.PortPairGroupUpdate,
- 'port-pair-group-delete': pg.PortPairGroupDelete,
- 'port-pair-group-show': pg.PortPairGroupShow}
- self.assertDictContainsSubset(ext_cmd, shell.COMMANDS['2.0'])
-
- def test_create_port_pair_group_with_mandatory_args(self):
- """Create port_pair_group: myname."""
- resource = 'port_pair_group'
- cmd = pg.PortPairGroupCreate(test_cli20.MyApp(sys.stdout), None)
- name = 'myname'
- myid = 'myid'
- args = [name, '--port-pair', pp1]
- position_names = ['name', 'port_pairs']
- position_values = [name, [pp1]]
- self._test_create_resource(resource, cmd, name, myid, args,
- position_names, position_values)
-
- def test_create_port_pair_group_with_ingress_egress_port_group(self):
- """Create port_pair_group: myname with multiple port pairs"""
- resource = 'port_pair_group'
- cmd = pg.PortPairGroupCreate(test_cli20.MyApp(sys.stdout), None)
- name = 'myname'
- myid = 'myid'
- args = [name, '--port-pair', pp1, '--port-pair', pp2]
- position_names = ['name', 'port_pairs']
- position_values = [name, [pp1, pp2]]
- self._test_create_resource(resource, cmd, name, myid, args,
- position_names, position_values)
-
- def test_delete_port_pair_group(self):
- """Delete port_pair_group: myid."""
- resource = 'port_pair_group'
- cmd = pg.PortPairGroupDelete(test_cli20.MyApp(sys.stdout), None)
- myid = 'myid'
- args = [myid]
- self._test_delete_resource(resource, cmd, myid, args)
-
- def test_update_port_group_only_port_pair(self):
- """Update port_pair_group"""
- resource = 'port_pair_group'
- cmd = pg.PortPairGroupUpdate(test_cli20.MyApp(sys.stdout), None)
- myid = 'myid'
- args = [myid, '--port-pair', pp1,
- '--port-pair', pp2]
- updatefields = {'port_pairs': [pp1, pp2]}
- self._test_update_resource(resource, cmd, myid, args, updatefields)
-
- def test_update_port_group_with_all_desc(self):
- """Update port_pair_group and description"""
- resource = 'port_pair_group'
- cmd = pg.PortPairGroupUpdate(test_cli20.MyApp(sys.stdout), None)
- myid = 'myid'
- args = [myid, '--port-pair', pp1,
- '--port-pair', pp2, '--description', 'my_port_group',
- '--description', 'my_port_pair_group']
- updatefields = {'port_pairs': [pp1, pp2],
- 'description': 'my_port_pair_group'}
- self._test_update_resource(resource, cmd, myid, args, updatefields)
-
- def test_list_port_pair_group(self):
- """List port_pair_group."""
- resources = 'port_pair_groups'
- cmd = pg.PortPairGroupList(test_cli20.MyApp(sys.stdout), None)
- self._test_list_resources(resources, cmd, True)
-
- def test_list_port_pair_group_limit(self):
- """size (1000) limited list: port-pair-group -P."""
- resources = "port_pair_groups"
- cmd = pg.PortPairGroupList(test_cli20.MyApp(sys.stdout), None)
- self._test_list_resources(resources, cmd, page_size=1000)
-
- def test_list_port_group_sort(self):
- """List port_pair_group: --sort-key name --sort-key id --sort-key asc
-
- --sort-key desc
- """
- resources = "port_pair_groups"
- cmd = pg.PortPairGroupList(test_cli20.MyApp(sys.stdout), None)
- self._test_list_resources(resources, cmd,
- sort_key=["name", "id"],
- sort_dir=["asc", "desc"])
-
- def test_show_port_group(self):
- """Show port-chain: --fields id --fields name myid."""
- resource = 'port_pair_group'
- cmd = pg.PortPairGroupShow(test_cli20.MyApp(sys.stdout), None)
- args = ['--fields', 'id', '--fields', 'name', self.test_id]
- self._test_show_resource(resource, cmd, self.test_id,
- args, ['id', 'name'])
diff --git a/networking_sfc/tests/unit/db/__init__.py b/networking_sfc/tests/unit/db/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/tests/unit/db/__init__.py
+++ /dev/null
diff --git a/networking_sfc/tests/unit/db/test_flowclassifier_db.py b/networking_sfc/tests/unit/db/test_flowclassifier_db.py
deleted file mode 100644
index 36c9af8..0000000
--- a/networking_sfc/tests/unit/db/test_flowclassifier_db.py
+++ /dev/null
@@ -1,677 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import mock
-import six
-import webob.exc
-
-from oslo_config import cfg
-from oslo_utils import importutils
-from oslo_utils import uuidutils
-
-from neutron.api import extensions as api_ext
-from neutron.common import config
-from neutron.common import constants as const
-import neutron.extensions as nextensions
-
-from networking_sfc.db import flowclassifier_db as fdb
-from networking_sfc import extensions
-from networking_sfc.extensions import flowclassifier as fc_ext
-from networking_sfc.tests import base
-
-
-DB_FLOWCLASSIFIER_PLUGIN_CLASS = (
- "networking_sfc.db.flowclassifier_db.FlowClassifierDbPlugin"
-)
-extensions_path = ':'.join(extensions.__path__ + nextensions.__path__)
-
-
-class FlowClassifierDbPluginTestCaseBase(base.BaseTestCase):
- def _create_flow_classifier(
- self, fmt, flow_classifier=None, expected_res_status=None, **kwargs
- ):
- ctx = kwargs.get('context', None)
- tenant_id = kwargs.get('tenant_id', self._tenant_id)
- data = {'flow_classifier': flow_classifier or {}}
- if ctx is None:
- data['flow_classifier'].update({'tenant_id': tenant_id})
- req = self.new_create_request(
- 'flow_classifiers', data, fmt, context=ctx
- )
- res = req.get_response(self.ext_api)
- if expected_res_status:
- self.assertEqual(res.status_int, expected_res_status)
- return res
-
- @contextlib.contextmanager
- def flow_classifier(
- self, fmt=None, flow_classifier=None, do_delete=True, **kwargs
- ):
- if not fmt:
- fmt = self.fmt
- res = self._create_flow_classifier(fmt, flow_classifier, **kwargs)
- if res.status_int >= 400:
- raise webob.exc.HTTPClientError(code=res.status_int)
- flow_classifier = self.deserialize(fmt or self.fmt, res)
- yield flow_classifier
- if do_delete:
- self._delete('flow_classifiers',
- flow_classifier['flow_classifier']['id'])
-
- def _get_expected_flow_classifier(self, flow_classifier):
- expected_flow_classifier = {
- 'name': flow_classifier.get('name') or '',
- 'description': flow_classifier.get('description') or '',
- 'source_port_range_min': flow_classifier.get(
- 'source_port_range_min'),
- 'source_port_range_max': flow_classifier.get(
- 'source_port_range_max'),
- 'destination_port_range_min': flow_classifier.get(
- 'destination_port_range_min'),
- 'destination_port_range_max': flow_classifier.get(
- 'destination_port_range_max'),
- 'ethertype': flow_classifier.get(
- 'ethertype') or 'IPv4',
- 'protocol': flow_classifier.get(
- 'protocol'),
- 'l7_parameters': flow_classifier.get(
- 'l7_parameters') or {}
- }
- if (
- 'source_ip_prefix' in flow_classifier and
- flow_classifier['source_ip_prefix']
- ):
- expected_flow_classifier['source_ip_prefix'] = (
- flow_classifier['source_ip_prefix'])
- if (
- 'destination_ip_prefix' in flow_classifier and
- flow_classifier['destination_ip_prefix']
- ):
- expected_flow_classifier['destination_ip_prefix'] = (
- flow_classifier['destination_ip_prefix'])
- return expected_flow_classifier
-
- def _test_create_flow_classifier(
- self, flow_classifier, expected_flow_classifier=None
- ):
- if expected_flow_classifier is None:
- expected_flow_classifier = self._get_expected_flow_classifier(
- flow_classifier)
- with self.flow_classifier(flow_classifier=flow_classifier) as fc:
- for k, v in six.iteritems(expected_flow_classifier):
- self.assertIn(k, fc['flow_classifier'])
- self.assertEqual(fc['flow_classifier'][k], v)
-
-
-class FlowClassifierDbPluginTestCase(
- base.NeutronDbPluginV2TestCase,
- FlowClassifierDbPluginTestCaseBase
-):
- resource_prefix_map = dict(
- (k, fc_ext.FLOW_CLASSIFIER_PREFIX)
- for k in fc_ext.RESOURCE_ATTRIBUTE_MAP.keys()
- )
-
- def setUp(self, core_plugin=None, flowclassifier_plugin=None,
- ext_mgr=None):
- mock_log_p = mock.patch.object(fdb, 'LOG')
- self.mock_log = mock_log_p.start()
- cfg.CONF.register_opts(fc_ext.flow_classifier_quota_opts, 'QUOTAS')
- if not flowclassifier_plugin:
- flowclassifier_plugin = DB_FLOWCLASSIFIER_PLUGIN_CLASS
- service_plugins = {
- fc_ext.FLOW_CLASSIFIER_EXT: flowclassifier_plugin
- }
- fdb.FlowClassifierDbPlugin.supported_extension_aliases = [
- fc_ext.FLOW_CLASSIFIER_EXT]
- fdb.FlowClassifierDbPlugin.path_prefix = (
- fc_ext.FLOW_CLASSIFIER_PREFIX
- )
- super(FlowClassifierDbPluginTestCase, self).setUp(
- ext_mgr=ext_mgr,
- plugin=core_plugin,
- service_plugins=service_plugins
- )
- if not ext_mgr:
- self.flowclassifier_plugin = importutils.import_object(
- flowclassifier_plugin)
- ext_mgr = api_ext.PluginAwareExtensionManager(
- extensions_path,
- {fc_ext.FLOW_CLASSIFIER_EXT: self.flowclassifier_plugin}
- )
- app = config.load_paste_app('extensions_test_app')
- self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr)
-
- def test_create_flow_classifier(self):
- self._test_create_flow_classifier({})
-
- def test_quota_create_flow_classifier(self):
- cfg.CONF.set_override('quota_flow_classifier', 3, group='QUOTAS')
- self._create_flow_classifier(self.fmt, {}, expected_res_status=201)
- self._create_flow_classifier(self.fmt, {}, expected_res_status=201)
- self._create_flow_classifier(self.fmt, {}, expected_res_status=201)
- self._create_flow_classifier(self.fmt, {}, expected_res_status=409)
-
- def test_create_flow_classifier_with_all_fields(self):
- self._test_create_flow_classifier({
- 'name': 'test1',
- 'ethertype': const.IPv4,
- 'protocol': const.PROTO_NAME_TCP,
- 'source_port_range_min': 100,
- 'source_port_range_max': 200,
- 'destination_port_range_min': 101,
- 'destination_port_range_max': 201,
- 'source_ip_prefix': '10.100.0.0/16',
- 'destination_ip_prefix': '10.200.0.0/16',
- 'logical_source_port': None,
- 'logical_destination_port': None,
- 'l7_parameters': {}
- })
-
- def test_create_flow_classifier_with_all_supported_ethertype(self):
- self._test_create_flow_classifier({
- 'ethertype': None
- })
- self._test_create_flow_classifier({
- 'ethertype': 'IPv4'
- })
- self._test_create_flow_classifier({
- 'ethertype': 'IPv6'
- })
-
- def test_create_flow_classifier_with_invalid_ethertype(self):
- self._create_flow_classifier(
- self.fmt, {
- 'ethertype': 'unsupported',
- },
- expected_res_status=400
- )
-
- def test_create_flow_classifier_with_all_supported_protocol(self):
- self._test_create_flow_classifier({
- 'protocol': None
- })
- self._test_create_flow_classifier({
- 'protocol': const.PROTO_NAME_TCP
- })
- self._test_create_flow_classifier({
- 'protocol': const.PROTO_NAME_UDP
- })
- self._test_create_flow_classifier({
- 'protocol': const.PROTO_NAME_ICMP
- })
-
- def test_create_flow_classifier_with_invalid_protocol(self):
- self._create_flow_classifier(
- self.fmt, {
- 'protocol': 'unsupported',
- },
- expected_res_status=400
- )
-
- def test_create_flow_classifier_with_all_supported_port_protocol(self):
- self._test_create_flow_classifier({
- 'source_port_range_min': None,
- 'source_port_range_max': None,
- 'destination_port_range_min': None,
- 'destination_port_range_max': None
- })
- self._test_create_flow_classifier({
- 'source_port_range_min': 100,
- 'source_port_range_max': 200,
- 'destination_port_range_min': 100,
- 'destination_port_range_max': 200,
- 'protocol': const.PROTO_NAME_TCP
- })
- self._test_create_flow_classifier({
- 'source_port_range_min': 100,
- 'source_port_range_max': 100,
- 'destination_port_range_min': 100,
- 'destination_port_range_max': 100,
- 'protocol': const.PROTO_NAME_TCP
- })
- self._test_create_flow_classifier({
- 'source_port_range_min': '100',
- 'source_port_range_max': '200',
- 'destination_port_range_min': '100',
- 'destination_port_range_max': '200',
- 'protocol': const.PROTO_NAME_UDP
- }, {
- 'source_port_range_min': 100,
- 'source_port_range_max': 200,
- 'destination_port_range_min': 100,
- 'destination_port_range_max': 200,
- 'protocol': const.PROTO_NAME_UDP
- })
-
- def test_create_flow_classifier_with_invalid__port_protocol(self):
- self._create_flow_classifier(
- self.fmt, {
- 'source_port_range_min': 'abc',
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'source_port_range_max': 'abc',
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'source_port_range_min': 100,
- 'source_port_range_max': 99,
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'source_port_range_min': 65536,
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'source_port_range_max': 65536,
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'source_port_range_min': -1,
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'source_port_range_max': -1,
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_port_range_min': 'abc',
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_port_range_max': 'abc',
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_port_range_min': 100,
- 'destination_port_range_max': 99,
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_port_range_min': 65536,
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_port_range_max': 65536,
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_port_range_min': -1,
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_port_range_max': -1,
- 'protocol': const.PROTO_NAME_TCP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'source_port_range_min': 100
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'source_port_range_max': 100
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'source_port_range_min': 100,
- 'source_port_range_max': 200
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'source_port_range_min': 100,
- 'source_port_range_max': 200,
- 'protocol': const.PROTO_NAME_ICMP
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_port_range_min': 100
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_port_range_max': 100
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_port_range_min': 100,
- 'destination_port_range_max': 200
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_port_range_min': 100,
- 'destination_port_range_max': 200,
- 'protocol': const.PROTO_NAME_ICMP
- },
- expected_res_status=400
- )
-
- def test_create_flow_classifier_with_all_supported_ip_prefix(self):
- self._test_create_flow_classifier({
- 'source_ip_prefix': None,
- 'destination_ip_prefix': None
- })
- self._test_create_flow_classifier({
- 'source_ip_prefix': '10.0.0.0/8',
- 'destination_ip_prefix': '10.0.0.0/8'
- })
-
- def test_create_flow_classifier_with_invalid_ip_prefix(self):
- self._create_flow_classifier(
- self.fmt, {
- 'source_ip_prefix': '10.0.0.0/34'
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'source_ip_prefix': '10.0.0.0.0/8'
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'source_ip_prefix': '256.0.0.0/8'
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'source_ip_prefix': '10.0.0.0'
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_ip_prefix': '10.0.0.0/34'
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_ip_prefix': '10.0.0.0.0/8'
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_ip_prefix': '256.0.0.0/8'
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'destination_ip_prefix': '10.0.0.0'
- },
- expected_res_status=400
- )
-
- def test_create_flow_classifier_with_all_supported_l7_parameters(self):
- self._test_create_flow_classifier({
- 'l7_parameters': None
- })
- self._test_create_flow_classifier({
- 'l7_parameters': {}
- })
-
- def test_create_flow_classifier_with_invalid_l7_parameters(self):
- self._create_flow_classifier(
- self.fmt, {
- 'l7_parameters': {'abc': 'def'}
- },
- expected_res_status=400
- )
-
- def test_create_flow_classifier_with_port_id(self):
- self._test_create_flow_classifier({
- 'logical_source_port': None,
- 'logical_destination_port': None,
- })
- with self.port(
- name='test1'
- ) as port:
- self._test_create_flow_classifier({
- 'logical_source_port': port['port']['id'],
- 'logical_destination_port': port['port']['id'],
- })
-
- def test_create_flow_classifier_with_nouuid_port_id(self):
- self._create_flow_classifier(
- self.fmt, {
- 'logical_source_port': 'abc'
- },
- expected_res_status=400
- )
- self._create_flow_classifier(
- self.fmt, {
- 'logical_destination_port': 'abc'
- },
- expected_res_status=400
- )
-
- def test_create_flow_classifier_with_unknown_port_id(self):
- self._create_flow_classifier(
- self.fmt, {
- 'logical_source_port': uuidutils.generate_uuid()
- },
- expected_res_status=404
- )
- self._create_flow_classifier(
- self.fmt, {
- 'logical_destination_port': uuidutils.generate_uuid()
- },
- expected_res_status=404
- )
-
- def test_list_flow_classifiers(self):
- with self.flow_classifier(flow_classifier={
- 'name': 'test1'
- }) as fc1, self.flow_classifier(flow_classifier={
- 'name': 'test2',
- }) as fc2:
- fcs = [fc1, fc2]
- self._test_list_resources(
- 'flow_classifier', fcs
- )
-
- def test_list_flow_classifiers_with_params(self):
- with self.flow_classifier(flow_classifier={
- 'name': 'test1'
- }) as fc1, self.flow_classifier(flow_classifier={
- 'name': 'test2',
- }) as fc2:
- self._test_list_resources(
- 'flow_classifier', [fc1],
- query_params='name=test1'
- )
- self._test_list_resources(
- 'flow_classifier', [fc2],
- query_params='name=test2'
- )
- self._test_list_resources(
- 'flow_classifier', [],
- query_params='name=test3'
- )
-
- def test_list_flow_classifiers_with_unknown_params(self):
- with self.flow_classifier(flow_classifier={
- 'name': 'test1'
- }) as fc1, self.flow_classifier(flow_classifier={
- 'name': 'test2',
- }) as fc2:
- self._test_list_resources(
- 'flow_classifier', [fc1, fc2],
- query_params='hello=test3'
- )
-
- def test_show_flow_classifier(self):
- with self.flow_classifier(flow_classifier={
- 'name': 'test1'
- }) as fc:
- req = self.new_show_request(
- 'flow_classifiers', fc['flow_classifier']['id']
- )
- res = self.deserialize(
- self.fmt, req.get_response(self.ext_api)
- )
- for k, v in six.iteritems(fc['flow_classifier']):
- self.assertEqual(res['flow_classifier'][k], v)
-
- def test_show_flow_classifier_noexist(self):
- req = self.new_show_request(
- 'flow_classifiers', '1'
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 404)
-
- def test_update_flow_classifier(self):
- with self.flow_classifier(flow_classifier={
- 'name': 'test1',
- 'description': 'desc1'
- }) as fc:
- updates = {
- 'name': 'test2',
- 'description': 'desc2',
- }
- req = self.new_update_request(
- 'flow_classifiers', {'flow_classifier': updates},
- fc['flow_classifier']['id']
- )
- res = self.deserialize(
- self.fmt,
- req.get_response(self.ext_api)
- )
- expected = fc['flow_classifier']
- expected.update(updates)
- for k, v in six.iteritems(expected):
- self.assertEqual(res['flow_classifier'][k], v)
- req = self.new_show_request(
- 'flow_classifiers', fc['flow_classifier']['id']
- )
- res = self.deserialize(
- self.fmt, req.get_response(self.ext_api)
- )
- for k, v in six.iteritems(expected):
- self.assertEqual(res['flow_classifier'][k], v)
-
- def _test_update_with_field(
- self, fc, updates, expected_status_code
- ):
- req = self.new_update_request(
- 'flow_classifiers', {'flow_classifier': updates},
- fc['flow_classifier']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, expected_status_code)
-
- def test_update_flow_classifer_unsupported_fields(self):
- with self.flow_classifier(flow_classifier={
- 'name': 'test1',
- 'description': 'desc1'
- }) as fc:
- self._test_update_with_field(
- fc, {'ethertype': None}, 400)
- self._test_update_with_field(
- fc, {'protocol': None}, 400)
- self._test_update_with_field(
- fc, {'source_port_range_min': None}, 400)
- self._test_update_with_field(
- fc, {'source_port_range_max': None}, 400)
- self._test_update_with_field(
- fc, {'destination_port_range_min': None}, 400)
- self._test_update_with_field(
- fc, {'destination_port_range_max': None}, 400)
- self._test_update_with_field(
- fc, {'source_ip_prefix': None}, 400)
- self._test_update_with_field(
- fc, {'destination_ip_prefix': None}, 400)
- self._test_update_with_field(
- fc, {'l7_parameters': None}, 400)
-
- def test_delete_flow_classifier(self):
- with self.flow_classifier(flow_classifier={
- 'name': 'test1'
- }, do_delete=False) as fc:
- req = self.new_delete_request(
- 'flow_classifiers', fc['flow_classifier']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 204)
- req = self.new_show_request(
- 'flow_classifiers', fc['flow_classifier']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 404)
-
- def test_delete_flow_classifier_noexist(self):
- req = self.new_delete_request(
- 'flow_classifiers', '1'
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 404)
diff --git a/networking_sfc/tests/unit/db/test_sfc_db.py b/networking_sfc/tests/unit/db/test_sfc_db.py
deleted file mode 100644
index 70d57e3..0000000
--- a/networking_sfc/tests/unit/db/test_sfc_db.py
+++ /dev/null
@@ -1,1490 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import mock
-import six
-import webob.exc
-
-from oslo_config import cfg
-from oslo_utils import importutils
-from oslo_utils import uuidutils
-
-from neutron.api import extensions as api_ext
-from neutron.common import config
-import neutron.extensions as nextensions
-
-from networking_sfc.db import flowclassifier_db as fdb
-from networking_sfc.db import sfc_db
-from networking_sfc import extensions
-from networking_sfc.extensions import flowclassifier as fc_ext
-from networking_sfc.extensions import sfc
-from networking_sfc.tests import base
-from networking_sfc.tests.unit.db import test_flowclassifier_db
-
-
-DB_SFC_PLUGIN_CLASS = (
- "networking_sfc.db.sfc_db.SfcDbPlugin"
-)
-extensions_path = ':'.join(extensions.__path__ + nextensions.__path__)
-
-
-class SfcDbPluginTestCaseBase(
- base.BaseTestCase
-):
- def _create_port_chain(
- self, fmt, port_chain=None, expected_res_status=None, **kwargs
- ):
- ctx = kwargs.get('context', None)
- tenant_id = kwargs.get('tenant_id', self._tenant_id)
- data = {'port_chain': port_chain or {}}
- if ctx is None:
- data['port_chain'].update({'tenant_id': tenant_id})
- req = self.new_create_request(
- 'port_chains', data, fmt, context=ctx
- )
- res = req.get_response(self.ext_api)
- if expected_res_status:
- self.assertEqual(res.status_int, expected_res_status)
- return res
-
- @contextlib.contextmanager
- def port_chain(self, fmt=None, port_chain=None, do_delete=True, **kwargs):
- if not fmt:
- fmt = self.fmt
- res = self._create_port_chain(fmt, port_chain, **kwargs)
- if res.status_int >= 400:
- raise webob.exc.HTTPClientError(code=res.status_int)
- port_chain = self.deserialize(fmt or self.fmt, res)
- yield port_chain
- if do_delete:
- self._delete('port_chains', port_chain['port_chain']['id'])
-
- def _create_port_pair_group(
- self, fmt, port_pair_group=None, expected_res_status=None, **kwargs
- ):
- ctx = kwargs.get('context', None)
- tenant_id = kwargs.get('tenant_id', self._tenant_id)
- data = {'port_pair_group': port_pair_group or {}}
- if ctx is None:
- data['port_pair_group'].update({'tenant_id': tenant_id})
- req = self.new_create_request(
- 'port_pair_groups', data, fmt, context=ctx
- )
- res = req.get_response(self.ext_api)
- if expected_res_status:
- self.assertEqual(res.status_int, expected_res_status)
- return res
-
- @contextlib.contextmanager
- def port_pair_group(
- self, fmt=None, port_pair_group=None, do_delete=True, **kwargs
- ):
- if not fmt:
- fmt = self.fmt
- res = self._create_port_pair_group(fmt, port_pair_group, **kwargs)
- if res.status_int >= 400:
- raise webob.exc.HTTPClientError(code=res.status_int)
- port_pair_group = self.deserialize(fmt or self.fmt, res)
- yield port_pair_group
- if do_delete:
- self._delete(
- 'port_pair_groups',
- port_pair_group['port_pair_group']['id'])
-
- def _create_port_pair(
- self, fmt, port_pair=None, expected_res_status=None, **kwargs
- ):
- ctx = kwargs.get('context', None)
- tenant_id = kwargs.get('tenant_id', self._tenant_id)
- data = {'port_pair': port_pair or {}}
- if ctx is None:
- data['port_pair'].update({'tenant_id': tenant_id})
- req = self.new_create_request(
- 'port_pairs', data, fmt, context=ctx
- )
- res = req.get_response(self.ext_api)
- if expected_res_status:
- self.assertEqual(res.status_int, expected_res_status)
- return res
-
- @contextlib.contextmanager
- def port_pair(self, fmt=None, port_pair=None, do_delete=True, **kwargs):
- if not fmt:
- fmt = self.fmt
- res = self._create_port_pair(fmt, port_pair, **kwargs)
- if res.status_int >= 400:
- raise webob.exc.HTTPClientError(code=res.status_int)
- port_pair = self.deserialize(fmt or self.fmt, res)
- yield port_pair
- if do_delete:
- self._delete('port_pairs', port_pair['port_pair']['id'])
-
- def _get_expected_port_pair(self, port_pair):
- return {
- 'name': port_pair.get('name') or '',
- 'description': port_pair.get('description') or '',
- 'egress': port_pair.get('egress'),
- 'ingress': port_pair.get('ingress'),
- 'service_function_parameters': port_pair.get(
- 'service_function_parameters') or {'correlation': None}
- }
-
- def _test_create_port_pair(self, port_pair, expected_port_pair=None):
- if expected_port_pair is None:
- expected_port_pair = self._get_expected_port_pair(port_pair)
- with self.port_pair(port_pair=port_pair) as pp:
- for k, v in six.iteritems(expected_port_pair):
- self.assertEqual(pp['port_pair'][k], v)
-
- def _test_create_port_pairs(
- self, port_pairs, expected_port_pairs=None
- ):
- if port_pairs:
- port_pair = port_pairs.pop()
- if expected_port_pairs:
- expected_port_pair = expected_port_pairs.pop()
- else:
- expected_port_pair = self._get_expected_port_pair(port_pair)
- with self.port_pair(port_pair=port_pair) as pp:
- for k, v in six.iteritems(expected_port_pair):
- self.assertEqual(pp['port_pair'][k], v)
-
- def _get_expected_port_pair_group(self, port_pair_group):
- return {
- 'name': port_pair_group.get('name') or '',
- 'description': port_pair_group.get('description') or '',
- 'port_pairs': port_pair_group.get('port_pairs') or []
- }
-
- def _test_create_port_pair_group(
- self, port_pair_group, expected_port_pair_group=None
- ):
- if expected_port_pair_group is None:
- expected_port_pair_group = self._get_expected_port_pair_group(
- port_pair_group)
- with self.port_pair_group(port_pair_group=port_pair_group) as pg:
- for k, v in six.iteritems(expected_port_pair_group):
- self.assertEqual(pg['port_pair_group'][k], v)
-
- def _test_create_port_pair_groups(
- self, port_pair_groups, expected_port_pair_groups=None
- ):
- if port_pair_groups:
- port_pair_group = port_pair_groups.pop()
- if expected_port_pair_groups:
- expected_port_pair_group = expected_port_pair_groups.pop()
- else:
- expected_port_pair_group = self._get_expected_port_pair_group(
- port_pair_group)
- with self.port_pair_group(port_pair_group=port_pair_group) as pg:
- for k, v in six.iteritems(expected_port_pair_group):
- self.assertEqual(pg['port_pair_group'][k], v)
-
- def _get_expected_port_chain(self, port_chain):
- return {
- 'name': port_chain.get('name') or '',
- 'description': port_chain.get('description') or '',
- 'port_pair_groups': port_chain['port_pair_groups'],
- 'flow_classifiers': port_chain.get('flow_classifiers') or [],
- 'chain_parameters': port_chain.get(
- 'chain_parameters') or {'correlation': 'mpls'}
- }
-
- def _test_create_port_chain(self, port_chain, expected_port_chain=None):
- if expected_port_chain is None:
- expected_port_chain = self._get_expected_port_chain(port_chain)
- with self.port_chain(port_chain=port_chain) as pc:
- for k, v in six.iteritems(expected_port_chain):
- self.assertEqual(pc['port_chain'][k], v)
-
- def _test_create_port_chains(
- self, port_chains, expected_port_chains=None
- ):
- if port_chains:
- port_chain = port_chains.pop()
- if expected_port_chains:
- expected_port_chain = expected_port_chains.pop()
- else:
- expected_port_chain = self._get_expected_port_chain(
- port_chain)
- with self.port_chain(port_chain=port_chain) as pc:
- for k, v in six.iteritems(expected_port_chain):
- self.assertEqual(pc['port_chain'][k], v)
-
-
-class SfcDbPluginTestCase(
- base.NeutronDbPluginV2TestCase,
- test_flowclassifier_db.FlowClassifierDbPluginTestCaseBase,
- SfcDbPluginTestCaseBase
-):
- resource_prefix_map = dict([
- (k, sfc.SFC_PREFIX)
- for k in sfc.RESOURCE_ATTRIBUTE_MAP.keys()
- ] + [
- (k, fc_ext.FLOW_CLASSIFIER_PREFIX)
- for k in fc_ext.RESOURCE_ATTRIBUTE_MAP.keys()
- ])
-
- def setUp(self, core_plugin=None, sfc_plugin=None,
- flowclassifier_plugin=None, ext_mgr=None):
- mock_log_p = mock.patch.object(sfc_db, 'LOG')
- self.mock_log = mock_log_p.start()
- cfg.CONF.register_opts(sfc.sfc_quota_opts, 'QUOTAS')
- if not sfc_plugin:
- sfc_plugin = DB_SFC_PLUGIN_CLASS
- if not flowclassifier_plugin:
- flowclassifier_plugin = (
- test_flowclassifier_db.DB_FLOWCLASSIFIER_PLUGIN_CLASS)
-
- service_plugins = {
- sfc.SFC_EXT: sfc_plugin,
- fc_ext.FLOW_CLASSIFIER_EXT: flowclassifier_plugin
- }
- sfc_db.SfcDbPlugin.supported_extension_aliases = [
- "sfc"]
- sfc_db.SfcDbPlugin.path_prefix = sfc.SFC_PREFIX
- fdb.FlowClassifierDbPlugin.supported_extension_aliases = [
- "flow_classifier"]
- fdb.FlowClassifierDbPlugin.path_prefix = (
- fc_ext.FLOW_CLASSIFIER_PREFIX
- )
- super(SfcDbPluginTestCase, self).setUp(
- ext_mgr=ext_mgr,
- plugin=core_plugin,
- service_plugins=service_plugins
- )
- if not ext_mgr:
- self.sfc_plugin = importutils.import_object(sfc_plugin)
- self.flowclassifier_plugin = importutils.import_object(
- flowclassifier_plugin)
- ext_mgr = api_ext.PluginAwareExtensionManager(
- extensions_path,
- {
- sfc.SFC_EXT: self.sfc_plugin,
- fc_ext.FLOW_CLASSIFIER_EXT: self.flowclassifier_plugin
- }
- )
- app = config.load_paste_app('extensions_test_app')
- self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr)
-
- def test_create_port_chain(self):
- with self.port_pair_group(port_pair_group={}) as pg:
- self._test_create_port_chain({
- 'port_pair_groups': [pg['port_pair_group']['id']]})
-
- def test_quota_create_port_chain(self):
- cfg.CONF.set_override('quota_port_chain', 3, group='QUOTAS')
- with self.port_pair_group(
- port_pair_group={}, do_delete=False
- ) as pg1, self.port_pair_group(
- port_pair_group={}, do_delete=False
- ) as pg2, self.port_pair_group(
- port_pair_group={}, do_delete=False
- ) as pg3, self.port_pair_group(
- port_pair_group={}, do_delete=False
- ) as pg4:
- self._create_port_chain(
- self.fmt, {
- 'port_pair_groups': [pg1['port_pair_group']['id']]
- }, expected_res_status=201)
- self._create_port_chain(
- self.fmt, {
- 'port_pair_groups': [pg2['port_pair_group']['id']]
- }, expected_res_status=201)
- self._create_port_chain(
- self.fmt, {
- 'port_pair_groups': [pg3['port_pair_group']['id']]
- }, expected_res_status=201)
- self._create_port_chain(
- self.fmt, {
- 'port_pair_groups': [pg4['port_pair_group']['id']]
- }, expected_res_status=409)
-
- def test_create_port_chain_all_fields(self):
- with self.port_pair_group(port_pair_group={}) as pg:
- self._test_create_port_chain({
- 'port_pair_groups': [pg['port_pair_group']['id']],
- 'flow_classifiers': [],
- 'name': 'abc',
- 'description': 'def',
- 'chain_parameters': {'correlation': 'mpls'}
- })
-
- def test_create_port_chain_multi_port_pair_groups(self):
- with self.port_pair_group(
- port_pair_group={}
- ) as pg1, self.port_pair_group(
- port_pair_group={}
- ) as pg2:
- self._test_create_port_chain({
- 'port_pair_groups': [
- pg1['port_pair_group']['id'],
- pg2['port_pair_group']['id']
- ]
- })
-
- def test_create_port_chain_shared_port_pair_groups(self):
- with self.port_pair_group(
- port_pair_group={}
- ) as pg1, self.port_pair_group(
- port_pair_group={}
- ) as pg2, self.port_pair_group(
- port_pair_group={}
- ) as pg3:
- self._test_create_port_chains([{
- 'port_pair_groups': [
- pg1['port_pair_group']['id'],
- pg2['port_pair_group']['id']
- ]
- }, {
- 'port_pair_groups': [
- pg1['port_pair_group']['id'],
- pg3['port_pair_group']['id']
- ]
- }])
-
- def test_create_port_chain_shared_port_pair_groups_different_order(self):
- with self.port_pair_group(
- port_pair_group={}
- ) as pg1, self.port_pair_group(
- port_pair_group={}
- ) as pg2:
- self._test_create_port_chains([{
- 'port_pair_groups': [
- pg1['port_pair_group']['id'],
- pg2['port_pair_group']['id']
- ]
- }, {
- 'port_pair_groups': [
- pg2['port_pair_group']['id'],
- pg1['port_pair_group']['id']
- ]
- }])
-
- def test_create_port_chain_with_empty_chain_parameters(self):
- with self.port_pair_group(port_pair_group={}) as pg:
- self._test_create_port_chain({
- 'chain_parameters': {},
- 'port_pair_groups': [pg['port_pair_group']['id']]
- })
-
- def test_create_port_chain_with_none_chain_parameters(self):
- with self.port_pair_group(port_pair_group={}) as pg:
- self._test_create_port_chain({
- 'chain_parameters': None,
- 'port_pair_groups': [pg['port_pair_group']['id']]
- })
-
- def test_create_port_chain_with_default_chain_parameters(self):
- with self.port_pair_group(port_pair_group={}) as pg:
- self._test_create_port_chain({
- 'chain_parameters': {'correlation': 'mpls'},
- 'port_pair_groups': [pg['port_pair_group']['id']]
- })
-
- def test_create_port_chain_with_none_flow_classifiers(self):
- with self.port_pair_group(port_pair_group={}) as pg:
- self._test_create_port_chain({
- 'flow_classifiers': None,
- 'port_pair_groups': [pg['port_pair_group']['id']]
- })
-
- def test_create_port_chain_with_empty_flow_classifiers(self):
- with self.port_pair_group(port_pair_group={}) as pg:
- self._test_create_port_chain({
- 'flow_classifiers': [],
- 'port_pair_groups': [pg['port_pair_group']['id']]
- })
-
- def test_create_port_chain_with_flow_classifiers(self):
- with self.flow_classifier(flow_classifier={}) as fc:
- with self.port_pair_group(port_pair_group={}) as pg:
- self._test_create_port_chain({
- 'flow_classifiers': [fc['flow_classifier']['id']],
- 'port_pair_groups': [pg['port_pair_group']['id']]
- })
-
- def test_create_port_chain_with_multi_flow_classifiers(self):
- with self.flow_classifier(
- flow_classifier={}
- ) as fc1, self.flow_classifier(
- flow_classifier={}
- ) as fc2:
- with self.port_pair_group(port_pair_group={}) as pg:
- self._test_create_port_chain({
- 'flow_classifiers': [
- fc1['flow_classifier']['id'],
- fc2['flow_classifier']['id']
- ],
- 'port_pair_groups': [pg['port_pair_group']['id']]
- })
-
- def test_create_port_chain_with_port_pairs(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pp1, self.port_pair(port_pair={
- 'ingress': dst_port['port']['id'],
- 'egress': src_port['port']['id']
- }) as pp2:
- with self.port_pair_group(port_pair_group={
- 'port_pairs': [
- pp1['port_pair']['id']
- ]
- }) as pg1, self.port_pair_group(port_pair_group={
- 'port_pairs': [
- pp2['port_pair']['id']
- ]
- }) as pg2:
- self._test_create_port_chain({
- 'port_pair_groups': [
- pg1['port_pair_group']['id'],
- pg2['port_pair_group']['id']
- ]
- })
-
- def test_create_port_chain_with_empty_port_pair_groups(self):
- self._create_port_chain(
- self.fmt, {'port_pair_groups': []},
- expected_res_status=400
- )
-
- def test_create_port_chain_with_nonuuid_port_pair_group_id(self):
- self._create_port_chain(
- self.fmt, {'port_pair_groups': ['unknown']},
- expected_res_status=400
- )
-
- def test_create_port_chain_with_unknown_port_pair_group_id(self):
- self._create_port_chain(
- self.fmt, {'port_pair_groups': [uuidutils.generate_uuid()]},
- expected_res_status=404
- )
-
- def test_create_port_chain_with_same_port_pair_groups(self):
- with self.port_pair_group(
- port_pair_group={}
- ) as pg:
- with self.port_chain(
- port_chain={
- 'port_pair_groups': [pg['port_pair_group']['id']]
- }
- ):
- self._create_port_chain(
- self.fmt, {
- 'port_pair_groups': [pg['port_pair_group']['id']]
- }, expected_res_status=409
- )
-
- def test_create_port_chain_with_no_port_pair_groups(self):
- self._create_port_chain(
- self.fmt, {}, expected_res_status=400
- )
-
- def test_create_port_chain_with_invalid_chain_parameters(self):
- with self.port_pair_group(port_pair_group={}) as pg:
- self._create_port_chain(
- self.fmt, {
- 'chain_parameters': {'correlation': 'unknown'},
- 'port_pair_groups': [pg['port_pair_group']['id']]
- }, expected_res_status=400
- )
-
- def test_create_port_chain_unknown_flow_classifiers(self):
- with self.port_pair_group(port_pair_group={}) as pg:
- self._create_port_chain(
- self.fmt, {
- 'flow_classifiers': [uuidutils.generate_uuid()],
- 'port_pair_groups': [pg['port_pair_group']['id']]
- }, expected_res_status=404
- )
-
- def test_create_port_chain_nouuid_flow_classifiers(self):
- with self.port_pair_group(port_pair_group={}) as pg:
- self._create_port_chain(
- self.fmt, {
- 'flow_classifiers': ['unknown'],
- 'port_pair_groups': [pg['port_pair_group']['id']]
- }, expected_res_status=400
- )
-
- def test_list_port_chains(self):
- with self.port_pair_group(
- port_pair_group={}
- ) as pg1, self.port_pair_group(
- port_pair_group={}
- ) as pg2:
- with self.port_chain(port_chain={
- 'port_pair_groups': [pg1['port_pair_group']['id']]
- }) as pc1, self.port_chain(port_chain={
- 'port_pair_groups': [pg2['port_pair_group']['id']]
- }) as pc2:
- port_chains = [pc1, pc2]
- self._test_list_resources(
- 'port_chain', port_chains
- )
-
- def test_list_port_chains_with_params(self):
- with self.port_pair_group(
- port_pair_group={}
- ) as pg1, self.port_pair_group(
- port_pair_group={}
- ) as pg2:
- with self.port_chain(port_chain={
- 'name': 'test1',
- 'port_pair_groups': [pg1['port_pair_group']['id']]
- }) as pc1, self.port_chain(port_chain={
- 'name': 'test2',
- 'port_pair_groups': [pg2['port_pair_group']['id']]
- }) as pc2:
- self._test_list_resources(
- 'port_chain', [pc1],
- query_params='name=test1'
- )
- self._test_list_resources(
- 'port_chain', [pc2],
- query_params='name=test2'
- )
- self._test_list_resources(
- 'port_chain', [],
- query_params='name=test3'
- )
-
- def test_list_port_chains_with_unknown_params(self):
- with self.port_pair_group(
- port_pair_group={}
- ) as pg1, self.port_pair_group(
- port_pair_group={}
- ) as pg2:
- with self.port_chain(port_chain={
- 'name': 'test1',
- 'port_pair_groups': [pg1['port_pair_group']['id']]
- }) as pc1, self.port_chain(port_chain={
- 'name': 'test2',
- 'port_pair_groups': [pg2['port_pair_group']['id']]
- }) as pc2:
- self._test_list_resources(
- 'port_chain', [pc1, pc2],
- query_params='hello=test3'
- )
-
- def test_show_port_chain(self):
- with self.port_pair_group(
- port_pair_group={}
- ) as pg:
- with self.port_chain(port_chain={
- 'name': 'test1',
- 'description': 'portchain',
- 'port_pair_groups': [pg['port_pair_group']['id']]
- }) as pc:
- req = self.new_show_request(
- 'port_chains', pc['port_chain']['id']
- )
- res = self.deserialize(
- self.fmt, req.get_response(self.ext_api)
- )
- expected = self._get_expected_port_chain(pc['port_chain'])
- for k, v in six.iteritems(expected):
- self.assertEqual(res['port_chain'][k], v)
-
- def test_show_port_chain_noexist(self):
- req = self.new_show_request(
- 'port_chains', '1'
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 404)
-
- def test_update_port_chain(self):
- with self.flow_classifier(
- flow_classifier={}
- ) as fc1, self.flow_classifier(
- flow_classifier={}
- ) as fc2:
- with self.port_pair_group(
- port_pair_group={}
- ) as pg:
- with self.port_chain(port_chain={
- 'name': 'test1',
- 'description': 'desc1',
- 'port_pair_groups': [pg['port_pair_group']['id']],
- 'flow_classifiers': [fc1['flow_classifier']['id']]
- }) as pc:
- updates = {
- 'name': 'test2',
- 'description': 'desc2',
- 'flow_classifiers': [fc2['flow_classifier']['id']]
- }
- req = self.new_update_request(
- 'port_chains', {'port_chain': updates},
- pc['port_chain']['id']
- )
- res = self.deserialize(
- self.fmt,
- req.get_response(self.ext_api)
- )
- expected = pc['port_chain']
- expected.update(updates)
- for k, v in six.iteritems(expected):
- self.assertEqual(res['port_chain'][k], v)
- req = self.new_show_request(
- 'port_chains', pc['port_chain']['id']
- )
- res = self.deserialize(
- self.fmt, req.get_response(self.ext_api)
- )
- for k, v in six.iteritems(expected):
- self.assertEqual(res['port_chain'][k], v)
-
- def test_update_port_chain_port_pair_groups(self):
- with self.port_pair_group(
- port_pair_group={}
- ) as pg1, self.port_pair_group(
- port_pair_group={}
- ) as pg2:
- with self.port_chain(port_chain={
- 'port_pair_groups': [pg1['port_pair_group']['id']],
- }) as pc:
- updates = {
- 'port_pair_groups': [pg2['port_pair_group']['id']]
- }
- req = self.new_update_request(
- 'port_chains', {'port_chain': updates},
- pc['port_chain']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 400)
-
- def test_update_port_chain_chain_parameters(self):
- with self.port_pair_group(
- port_pair_group={}
- ) as pg:
- with self.port_chain(port_chain={
- 'port_pair_groups': [pg['port_pair_group']['id']],
- }) as pc:
- updates = {
- 'chain_parameters': {'correlation': 'mpls'}
- }
- req = self.new_update_request(
- 'port_chains', {'port_chain': updates},
- pc['port_chain']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 400)
-
- def test_delete_port_chain(self):
- with self.port_pair_group(
- port_pair_group={}
- ) as pg:
- with self.port_chain(port_chain={
- 'port_pair_groups': [pg['port_pair_group']['id']]
- }, do_delete=False) as pc:
- req = self.new_delete_request(
- 'port_chains', pc['port_chain']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 204)
- req = self.new_show_request(
- 'port_chains', pc['port_chain']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 404)
- req = self.new_show_request(
- 'port_pair_groups', pg['port_pair_group']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 200)
-
- def test_delete_port_chain_noexist(self):
- req = self.new_delete_request(
- 'port_chains', '1'
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 404)
-
- def test_delete_flow_classifier_port_chain_exist(self):
- with self.flow_classifier(flow_classifier={
- }) as fc:
- with self.port_pair_group(port_pair_group={
- }) as pg:
- with self.port_chain(port_chain={
- 'port_pair_groups': [pg['port_pair_group']['id']],
- 'flow_classifiers': [fc['flow_classifier']['id']]
- }):
- req = self.new_delete_request(
- 'flow_classifiers', fc['flow_classifier']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 409)
-
- def test_create_port_pair_group(self):
- self._test_create_port_pair_group({})
-
- def test_quota_create_port_pair_group_quota(self):
- cfg.CONF.set_override('quota_port_pair_group', 3, group='QUOTAS')
- self._create_port_pair_group(
- self.fmt, {'port_pairs': []}, expected_res_status=201
- )
- self._create_port_pair_group(
- self.fmt, {'port_pairs': []}, expected_res_status=201
- )
- self._create_port_pair_group(
- self.fmt, {'port_pairs': []}, expected_res_status=201
- )
- self._create_port_pair_group(
- self.fmt, {'port_pairs': []}, expected_res_status=409
- )
-
- def test_create_port_pair_group_all_fields(self):
- self._test_create_port_pair_group({
- 'name': 'test1',
- 'description': 'desc1',
- 'port_pairs': []
- })
-
- def test_create_port_pair_group_with_port_pairs(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pp1, self.port_pair(port_pair={
- 'ingress': dst_port['port']['id'],
- 'egress': src_port['port']['id']
- }) as pp2:
- self._test_create_port_pair_group({
- 'port_pairs': [
- pp1['port_pair']['id'],
- pp2['port_pair']['id']
- ]
- })
-
- def test_create_port_pair_group_with_nouuid_port_pair_id(self):
- self._create_port_pair_group(
- self.fmt, {'port_pairs': ['unknown']},
- expected_res_status=400
- )
-
- def test_create_port_pair_group_with_unknown_port_pair_id(self):
- self._create_port_pair_group(
- self.fmt, {'port_pairs': [uuidutils.generate_uuid()]},
- expected_res_status=404
- )
-
- def test_create_port_pair_group_share_port_pair_id(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pp:
- with self.port_pair_group(port_pair_group={
- 'port_pairs': [pp['port_pair']['id']]
- }):
- self._create_port_pair_group(
- self.fmt, {'port_pairs': [pp['port_pair']['id']]},
- expected_res_status=409
- )
-
- def test_list_port_pair_groups(self):
- with self.port_pair_group(port_pair_group={
- 'name': 'test1'
- }) as pc1, self.port_pair_group(port_pair_group={
- 'name': 'test2'
- }) as pc2:
- port_pair_groups = [pc1, pc2]
- self._test_list_resources(
- 'port_pair_group', port_pair_groups
- )
-
- def test_list_port_pair_groups_with_params(self):
- with self.port_pair_group(port_pair_group={
- 'name': 'test1'
- }) as pc1, self.port_pair_group(port_pair_group={
- 'name': 'test2'
- }) as pc2:
- self._test_list_resources(
- 'port_pair_group', [pc1],
- query_params='name=test1'
- )
- self._test_list_resources(
- 'port_pair_group', [pc2],
- query_params='name=test2'
- )
- self._test_list_resources(
- 'port_pair_group', [],
- query_params='name=test3'
- )
-
- def test_list_port_pair_groups_with_unknown_params(self):
- with self.port_pair_group(port_pair_group={
- 'name': 'test1'
- }) as pc1, self.port_pair_group(port_pair_group={
- 'name': 'test2'
- }) as pc2:
- self._test_list_resources(
- 'port_pair_group', [pc1, pc2],
- query_params='hello=test3'
- )
-
- def test_show_port_pair_group(self):
- with self.port_pair_group(port_pair_group={
- 'name': 'test1'
- }) as pc:
- req = self.new_show_request(
- 'port_pair_groups', pc['port_pair_group']['id']
- )
- res = self.deserialize(
- self.fmt, req.get_response(self.ext_api)
- )
- for k, v in six.iteritems(pc['port_pair_group']):
- self.assertEqual(res['port_pair_group'][k], v)
-
- def test_show_port_pair_group_noexist(self):
- req = self.new_show_request(
- 'port_pair_groups', '1'
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 404)
-
- def test_update_port_pair_group(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pp1, self.port_pair(port_pair={
- 'ingress': dst_port['port']['id'],
- 'egress': src_port['port']['id']
- }) as pp2:
- with self.port_pair_group(port_pair_group={
- 'name': 'test1',
- 'description': 'desc1',
- 'port_pairs': [pp1['port_pair']['id']]
- }) as pg:
- updates = {
- 'name': 'test2',
- 'description': 'desc2',
- 'port_pairs': [pp2['port_pair']['id']]
- }
- req = self.new_update_request(
- 'port_pair_groups', {'port_pair_group': updates},
- pg['port_pair_group']['id']
- )
- res = self.deserialize(
- self.fmt,
- req.get_response(self.ext_api)
- )
- expected = pg['port_pair_group']
- expected.update(updates)
- for k, v in six.iteritems(expected):
- self.assertEqual(res['port_pair_group'][k], v)
- req = self.new_show_request(
- 'port_pair_groups', pg['port_pair_group']['id']
- )
- res = self.deserialize(
- self.fmt, req.get_response(self.ext_api)
- )
- for k, v in six.iteritems(expected):
- self.assertEqual(res['port_pair_group'][k], v)
-
- def test_delete_port_pair_group(self):
- with self.port_pair_group(port_pair_group={
- 'name': 'test1'
- }, do_delete=False) as pc:
- req = self.new_delete_request(
- 'port_pair_groups', pc['port_pair_group']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 204)
- req = self.new_show_request(
- 'port_pair_groups', pc['port_pair_group']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 404)
-
- def test_delete_port_pair_group_port_chain_exist(self):
- with self.port_pair_group(port_pair_group={
- 'name': 'test1'
- }) as pg:
- with self.port_chain(port_chain={
- 'port_pair_groups': [pg['port_pair_group']['id']]
- }):
- req = self.new_delete_request(
- 'port_pair_groups', pg['port_pair_group']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 409)
-
- def test_delete_port_pair_group_noexist(self):
- req = self.new_delete_request(
- 'port_pair_groups', '1'
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 404)
-
- def test_create_port_pair(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- self._test_create_port_pair({
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- })
-
- def test_quota_create_port_pair_quota(self):
- cfg.CONF.set_override('quota_port_pair', 3, group='QUOTAS')
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port1, self.port(
- name='port2',
- device_id='default'
- ) as dst_port1, self.port(
- name='port3',
- device_id='default'
- ) as src_port2, self.port(
- name='port4',
- device_id='default'
- ) as dst_port2, self.port(
- name='port5',
- device_id='default'
- ) as src_port3, self.port(
- name='port6',
- device_id='default'
- ) as dst_port3, self.port(
- name='port7',
- device_id='default'
- ) as src_port4, self.port(
- name='port8',
- device_id='default'
- ) as dst_port4:
- self._create_port_pair(
- self.fmt, {
- 'ingress': src_port1['port']['id'],
- 'egress': dst_port1['port']['id']
- }, expected_res_status=201)
- self._create_port_pair(
- self.fmt, {
- 'ingress': src_port2['port']['id'],
- 'egress': dst_port2['port']['id']
- }, expected_res_status=201)
- self._create_port_pair(
- self.fmt, {
- 'ingress': src_port3['port']['id'],
- 'egress': dst_port3['port']['id']
- }, expected_res_status=201)
- self._create_port_pair(
- self.fmt, {
- 'ingress': src_port4['port']['id'],
- 'egress': dst_port4['port']['id']
- }, expected_res_status=409)
-
- def test_create_port_pair_all_fields(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- self._test_create_port_pair({
- 'name': 'test1',
- 'description': 'desc1',
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id'],
- 'service_function_parameters': {'correlation': None}
- })
-
- def test_create_port_pair_none_service_function_parameters(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- self._test_create_port_pair({
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id'],
- 'service_function_parameters': None
- })
-
- def test_create_port_pair_empty_service_function_parameters(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- self._test_create_port_pair({
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id'],
- 'service_function_parameters': {}
- })
-
- def test_create_port_pair_with_src_dst_same_port(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_dst_port:
- self._test_create_port_pair({
- 'ingress': src_dst_port['port']['id'],
- 'egress': src_dst_port['port']['id']
- })
-
- def test_create_port_pair_empty_input(self):
- self._create_port_pair(self.fmt, {}, expected_res_status=400)
-
- def test_create_port_pair_with_no_ingress(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as dst_port:
- self._create_port_pair(
- self.fmt,
- {
- 'egress': dst_port['port']['id']
- },
- expected_res_status=400
- )
-
- def test_create_port_pair_with_no_egress(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port:
- self._create_port_pair(
- self.fmt,
- {
- 'ingress': src_port['port']['id']
- },
- expected_res_status=400
- )
-
- def test_create_port_pair_with_nouuid_ingress(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as dst_port:
- self._create_port_pair(
- self.fmt,
- {
- 'ingress': '1',
- 'egress': dst_port['port']['id']
- },
- expected_res_status=400
- )
-
- def test_create_port_pair_with_unknown_ingress(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as dst_port:
- self._create_port_pair(
- self.fmt,
- {
- 'ingress': uuidutils.generate_uuid(),
- 'egress': dst_port['port']['id']
- },
- expected_res_status=404
- )
-
- def test_create_port_pair_with_nouuid_egress(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port:
- self._create_port_pair(
- self.fmt,
- {
- 'ingress': src_port['port']['id'],
- 'egress': '1'
- },
- expected_res_status=400
- )
-
- def test_create_port_pair_with_unkown_egress(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port:
- self._create_port_pair(
- self.fmt,
- {
- 'ingress': src_port['port']['id'],
- 'egress': uuidutils.generate_uuid()
- },
- expected_res_status=404
- )
-
- def test_create_port_pair_ingress_egress_different_hosts(self):
- with self.port(
- name='port1',
- device_id='device1'
- ) as src_port, self.port(
- name='port2',
- device_id='device2'
- ) as dst_port:
- self._create_port_pair(
- self.fmt,
- {
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- },
- expected_res_status=400
- )
-
- def test_create_port_pair_with_invalid_service_function_parameters(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_dst_port:
- self._create_port_pair(
- self.fmt,
- {
- 'ingress': src_dst_port['port']['id'],
- 'egress': src_dst_port['port']['id'],
- 'service_function_parameters': {'abc': 'def'}
- },
- expected_res_status=400
- )
-
- def test_list_port_pairs(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pc1, self.port_pair(port_pair={
- 'ingress': dst_port['port']['id'],
- 'egress': src_port['port']['id']
- }) as pc2:
- port_pairs = [pc1, pc2]
- self._test_list_resources(
- 'port_pair', port_pairs
- )
-
- def test_list_port_pairs_with_params(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'name': 'test1',
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pc1, self.port_pair(port_pair={
- 'name': 'test2',
- 'ingress': dst_port['port']['id'],
- 'egress': src_port['port']['id']
- }) as pc2:
- self._test_list_resources(
- 'port_pair', [pc1],
- query_params='name=test1'
- )
- self._test_list_resources(
- 'port_pair', [pc2],
- query_params='name=test2'
- )
- self._test_list_resources(
- 'port_pair', [],
- query_params='name=test3'
- )
-
- def test_list_port_pairs_with_unknown_params(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'name': 'test1',
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pc1, self.port_pair(port_pair={
- 'name': 'test2',
- 'ingress': dst_port['port']['id'],
- 'egress': src_port['port']['id']
- }) as pc2:
- port_pairs = [pc1, pc2]
- self._test_list_resources(
- 'port_pair', port_pairs,
- query_params='hello=test3'
- )
-
- def test_show_port_pair(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pc:
- req = self.new_show_request(
- 'port_pairs', pc['port_pair']['id']
- )
- res = self.deserialize(
- self.fmt, req.get_response(self.ext_api)
- )
- for k, v in six.iteritems(pc['port_pair']):
- self.assertEqual(res['port_pair'][k], v)
-
- def test_show_port_pair_noexist(self):
- req = self.new_show_request(
- 'port_pairs', '1'
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 404)
-
- def test_update_port_pair(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'name': 'test1',
- 'description': 'desc1',
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pc:
- updates = {
- 'name': 'test2',
- 'description': 'desc2'
- }
- req = self.new_update_request(
- 'port_pairs', {'port_pair': updates},
- pc['port_pair']['id']
- )
- res = self.deserialize(
- self.fmt,
- req.get_response(self.ext_api)
- )
- expected = pc['port_pair']
- expected.update(updates)
- for k, v in six.iteritems(expected):
- self.assertEqual(res['port_pair'][k], v)
- req = self.new_show_request(
- 'port_pairs', pc['port_pair']['id']
- )
- res = self.deserialize(
- self.fmt, req.get_response(self.ext_api)
- )
- for k, v in six.iteritems(expected):
- self.assertEqual(res['port_pair'][k], v)
-
- def test_update_port_pair_service_function_parameters(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'name': 'test1',
- 'description': 'desc1',
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pc:
- updates = {
- 'service_function_parameters': {
- 'correlation': None
- }
- }
- req = self.new_update_request(
- 'port_pairs', {'port_pair': updates},
- pc['port_pair']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 400)
-
- def test_update_port_pair_ingress(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'name': 'test1',
- 'description': 'desc1',
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pc:
- updates = {
- 'ingress': dst_port['port']['id']
- }
- req = self.new_update_request(
- 'port_pairs', {'port_pair': updates},
- pc['port_pair']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 400)
-
- def test_update_port_pair_egress(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'name': 'test1',
- 'description': 'desc1',
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pc:
- updates = {
- 'egress': src_port['port']['id']
- }
- req = self.new_update_request(
- 'port_pairs', {'port_pair': updates},
- pc['port_pair']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 400)
-
- def test_delete_port_pair(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }, do_delete=False) as pc:
- req = self.new_delete_request(
- 'port_pairs', pc['port_pair']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 204)
- req = self.new_show_request(
- 'port_pairs', pc['port_pair']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 404)
-
- def test_delete_port_pair_noexist(self):
- req = self.new_delete_request(
- 'port_pairs', '1'
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 404)
-
- def test_delete_port_pair_port_pair_group_exist(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pp:
- with self.port_pair_group(port_pair_group={
- 'port_pairs': [pp['port_pair']['id']]
- }):
- req = self.new_delete_request(
- 'port_pairs', pp['port_pair']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 409)
-
- def test_delete_ingress_port_pair_exist(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }):
- req = self.new_delete_request(
- 'ports', src_port['port']['id']
- )
- res = req.get_response(self.api)
- self.assertEqual(res.status_int, 500)
-
- def test_delete_egress_port_pair_exist(self):
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }):
- req = self.new_delete_request(
- 'ports', dst_port['port']['id']
- )
- res = req.get_response(self.api)
- self.assertEqual(res.status_int, 500)
diff --git a/networking_sfc/tests/unit/extensions/__init__.py b/networking_sfc/tests/unit/extensions/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/tests/unit/extensions/__init__.py
+++ /dev/null
diff --git a/networking_sfc/tests/unit/extensions/test_flowclassifier.py b/networking_sfc/tests/unit/extensions/test_flowclassifier.py
deleted file mode 100644
index 7026ac5..0000000
--- a/networking_sfc/tests/unit/extensions/test_flowclassifier.py
+++ /dev/null
@@ -1,603 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import mock
-from webob import exc
-import webtest
-
-from oslo_utils import uuidutils
-
-from neutron.api.v2 import resource as api_res_log
-from neutron.common import config
-from neutron import manager
-from neutron.notifiers import nova as nova_log
-
-from neutron.tests.unit.api.v2 import test_base as test_api_v2
-from neutron.tests.unit.extensions import base as test_api_v2_extension
-
-from networking_sfc.extensions import flowclassifier as fc_ext
-
-_uuid = uuidutils.generate_uuid
-_get_path = test_api_v2._get_path
-
-FLOW_CLASSIFIER_PATH = (fc_ext.FLOW_CLASSIFIER_PREFIX[1:] + '/' +
- fc_ext.FLOW_CLASSIFIER_EXT + 's')
-
-
-class FlowClassifierExtensionTestCase(
- test_api_v2_extension.ExtensionTestCase
-):
- fmt = 'json'
-
- def setUp(self):
- self._mock_unncessary_logging()
- super(FlowClassifierExtensionTestCase, self).setUp()
- self._setUpExtension(
- 'networking_sfc.extensions.flowclassifier.'
- 'FlowClassifierPluginBase',
- fc_ext.FLOW_CLASSIFIER_EXT,
- fc_ext.RESOURCE_ATTRIBUTE_MAP,
- fc_ext.Flowclassifier,
- fc_ext.FLOW_CLASSIFIER_PREFIX[1:],
- plural_mappings={}
- )
-
- def _mock_unncessary_logging(self):
- mock_log_cfg_p = mock.patch.object(config, 'LOG')
- self.mock_log_cfg = mock_log_cfg_p.start()
-
- mock_log_manager_p = mock.patch.object(manager, 'LOG')
- self.mock_log_manager = mock_log_manager_p.start()
-
- mock_log_nova_p = mock.patch.object(nova_log, 'LOG')
- self.mock_log_nova = mock_log_nova_p.start()
-
- mock_log_api_res_log_p = mock.patch.object(api_res_log, 'LOG')
- self.mock_log_api_res_log = mock_log_api_res_log_p.start()
-
- def _get_expected_flow_classifier(self, data):
- source_port_range_min = data['flow_classifier'].get(
- 'source_port_range_min')
- if source_port_range_min is not None:
- source_port_range_min = int(source_port_range_min)
- source_port_range_max = data['flow_classifier'].get(
- 'source_port_range_max')
- if source_port_range_max is not None:
- source_port_range_max = int(source_port_range_max)
- destination_port_range_min = data['flow_classifier'].get(
- 'destination_port_range_min')
- if destination_port_range_min is not None:
- destination_port_range_min = int(destination_port_range_min)
- destination_port_range_max = data['flow_classifier'].get(
- 'destination_port_range_max')
- if destination_port_range_max is not None:
- destination_port_range_max = int(destination_port_range_max)
-
- return {'flow_classifier': {
- 'name': data['flow_classifier'].get('name') or '',
- 'description': data['flow_classifier'].get('description') or '',
- 'tenant_id': data['flow_classifier']['tenant_id'],
- 'source_port_range_min': source_port_range_min,
- 'source_port_range_max': source_port_range_max,
- 'destination_port_range_min': destination_port_range_min,
- 'destination_port_range_max': destination_port_range_max,
- 'l7_parameters': data['flow_classifier'].get(
- 'l7_parameters') or {},
- 'destination_ip_prefix': data['flow_classifier'].get(
- 'destination_ip_prefix'),
- 'source_ip_prefix': data['flow_classifier'].get(
- 'source_ip_prefix'),
- 'logical_source_port': data['flow_classifier'].get(
- 'logical_source_port'),
- 'logical_destination_port': data['flow_classifier'].get(
- 'logical_destination_port'),
- 'ethertype': data['flow_classifier'].get(
- 'ethertype') or 'IPv4',
- 'protocol': data['flow_classifier'].get(
- 'protocol')
- }}
-
- def _clean_expected_flow_classifier(self, expected_flow_classifier):
- if 'logical_source_port' in expected_flow_classifier:
- del expected_flow_classifier['logical_source_port']
- if 'logical_destination_port' in expected_flow_classifier:
- del expected_flow_classifier['logical_destination_port']
-
- def test_create_flow_classifier(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'tenant_id': _uuid(),
- }}
- expected_data = self._get_expected_flow_classifier(data)
- return_value = copy.copy(expected_data['flow_classifier'])
- return_value.update({'id': flowclassifier_id})
- self._clean_expected_flow_classifier(return_value)
- instance = self.plugin.return_value
- instance.create_flow_classifier.return_value = return_value
- res = self.api.post(
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_flow_classifier.assert_called_with(
- mock.ANY,
- flow_classifier=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('flow_classifier', res)
- self.assertEqual(return_value, res['flow_classifier'])
-
- def test_create_flow_classifier_port_string(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'source_port_range_min': '100',
- 'source_port_range_max': '200',
- 'destination_port_range_min': '100',
- 'destination_port_range_max': '200',
- 'tenant_id': _uuid(),
- }}
- expected_data = self._get_expected_flow_classifier(data)
- return_value = copy.copy(expected_data['flow_classifier'])
- return_value.update({'id': flowclassifier_id})
- self._clean_expected_flow_classifier(return_value)
- instance = self.plugin.return_value
- instance.create_flow_classifier.return_value = return_value
- res = self.api.post(
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_flow_classifier.assert_called_with(
- mock.ANY,
- flow_classifier=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('flow_classifier', res)
- self.assertEqual(return_value, res['flow_classifier'])
-
- def test_create_flow_classifier_ip_prefix_with_mask(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'source_ip_prefix': '10.0.0.0/8',
- 'tenant_id': _uuid(),
- }}
- expected_data = self._get_expected_flow_classifier(data)
- return_value = copy.copy(expected_data['flow_classifier'])
- return_value.update({'id': flowclassifier_id})
- self._clean_expected_flow_classifier(return_value)
- instance = self.plugin.return_value
- instance.create_flow_classifier.return_value = return_value
- res = self.api.post(
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_flow_classifier.assert_called_with(
- mock.ANY,
- flow_classifier=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('flow_classifier', res)
- self.assertEqual(return_value, res['flow_classifier'])
-
- def test_create_flow_classifier_non_l7_parameters(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'tenant_id': _uuid(),
- 'l7_parameters': None
- }}
- expected_data = self._get_expected_flow_classifier(data)
- return_value = copy.copy(expected_data['flow_classifier'])
- return_value.update({'id': flowclassifier_id})
- self._clean_expected_flow_classifier(return_value)
- instance = self.plugin.return_value
- instance.create_flow_classifier.return_value = return_value
- res = self.api.post(
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_flow_classifier.assert_called_with(
- mock.ANY,
- flow_classifier=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('flow_classifier', res)
- self.assertEqual(return_value, res['flow_classifier'])
-
- def test_create_flow_classifier_default_ethertype(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'tenant_id': _uuid(),
- 'ethertype': 'IPv4'
- }}
- expected_data = self._get_expected_flow_classifier(data)
- return_value = copy.copy(expected_data['flow_classifier'])
- return_value.update({'id': flowclassifier_id})
- self._clean_expected_flow_classifier(return_value)
- instance = self.plugin.return_value
- instance.create_flow_classifier.return_value = return_value
- res = self.api.post(
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_flow_classifier.assert_called_with(
- mock.ANY,
- flow_classifier=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('flow_classifier', res)
- self.assertEqual(return_value, res['flow_classifier'])
-
- def test_create_flow_classifier_all_fields(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'name': 'test1',
- 'description': 'desc',
- 'tenant_id': _uuid(),
- 'source_port_range_min': 100,
- 'source_port_range_max': 200,
- 'destination_port_range_min': 100,
- 'destination_port_range_max': 200,
- 'l7_parameters': {},
- 'destination_ip_prefix': '10.0.0.0/8',
- 'source_ip_prefix': '10.0.0.0/8',
- 'logical_source_port': _uuid(),
- 'logical_destination_port': _uuid(),
- 'ethertype': None,
- 'protocol': None
- }}
- expected_data = self._get_expected_flow_classifier(data)
- return_value = copy.copy(expected_data['flow_classifier'])
- return_value.update({'id': flowclassifier_id})
- self._clean_expected_flow_classifier(return_value)
- instance = self.plugin.return_value
- instance.create_flow_classifier.return_value = return_value
- res = self.api.post(
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_flow_classifier.assert_called_with(
- mock.ANY,
- flow_classifier=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('flow_classifier', res)
- self.assertEqual(return_value, res['flow_classifier'])
-
- def test_create_flow_classifier_invalid_l7_parameters(self):
- data = {'flow_classifier': {
- 'l7_parameters': {'abc': 'def'},
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_create_flow_classifier_invalid_protocol(self):
- data = {'flow_classifier': {
- 'protocol': 'unknown',
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_create_flow_classifier_invalid_ethertype(self):
- data = {'flow_classifier': {
- 'ethertype': 'unknown',
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_create_flow_classifier_port_small(self):
- data = {'flow_classifier': {
- 'source_port_range_min': -1,
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_create_flow_classifier_port_large(self):
- data = {'flow_classifier': {
- 'source_port_range_min': 65536,
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_create_flow_classifier_ip_prefix_no_cidr(self):
- data = {'flow_classifier': {
- 'source_ip_prefix': '10.0.0.0',
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_create_flow_classifier_ip_prefix_invalid_cidr(self):
- data = {'flow_classifier': {
- 'source_ip_prefix': '10.0.0.0/33',
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_create_flow_classifier_port_id_nouuid(self):
- data = {'flow_classifier': {
- 'logical_source_port': 'unknown',
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_flow_classifier_list(self):
- flowclassifier_id = _uuid()
- return_value = [{
- 'tenant_id': _uuid(),
- 'id': flowclassifier_id
- }]
- instance = self.plugin.return_value
- instance.get_flow_classifiers.return_value = return_value
-
- res = self.api.get(
- _get_path(FLOW_CLASSIFIER_PATH, fmt=self.fmt))
-
- instance.get_flow_classifiers.assert_called_with(
- mock.ANY,
- fields=mock.ANY,
- filters=mock.ANY
- )
- self.assertEqual(res.status_int, exc.HTTPOk.code)
- res = self.deserialize(res)
- self.assertIn('flow_classifiers', res)
- self.assertEqual(res['flow_classifiers'], return_value)
-
- def test_flow_classifier_get(self):
- flowclassifier_id = _uuid()
- return_value = {
- 'tenant_id': _uuid(),
- 'id': flowclassifier_id
- }
-
- instance = self.plugin.return_value
- instance.get_flow_classifier.return_value = return_value
-
- res = self.api.get(
- _get_path(
- FLOW_CLASSIFIER_PATH,
- id=flowclassifier_id, fmt=self.fmt
- )
- )
-
- instance.get_flow_classifier.assert_called_with(
- mock.ANY,
- flowclassifier_id,
- fields=mock.ANY
- )
- self.assertEqual(res.status_int, exc.HTTPOk.code)
- res = self.deserialize(res)
- self.assertIn('flow_classifier', res)
- self.assertEqual(return_value, res['flow_classifier'])
-
- def test_flow_classifier_update(self):
- flowclassifier_id = _uuid()
- update_data = {'flow_classifier': {
- 'name': 'new_name',
- 'description': 'new_desc',
- }}
- return_value = {
- 'tenant_id': _uuid(),
- 'id': flowclassifier_id
- }
-
- instance = self.plugin.return_value
- instance.update_flow_classifier.return_value = return_value
-
- res = self.api.put(
- _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
- fmt=self.fmt),
- self.serialize(update_data))
-
- instance.update_flow_classifier.assert_called_with(
- mock.ANY, flowclassifier_id,
- flow_classifier=update_data)
- self.assertEqual(res.status_int, exc.HTTPOk.code)
- res = self.deserialize(res)
- self.assertIn('flow_classifier', res)
- self.assertEqual(res['flow_classifier'], return_value)
-
- def test_flow_classifier_update_source_port_range_min(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'source_port_range_min': 100,
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_flow_classifier_update_source_port_range_max(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'source_port_range_max': 100,
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_flow_classifier_update_destination_port_range_min(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'destination_port_range_min': 100,
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_flow_classifier_update_destination_port_range_max(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'destination_port_range_max': 100,
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_flow_classifier_update_source_ip_prefix(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'source_ip_prefix': '10.0.0.0/8',
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_flow_classifier_update_destination_ip_prefix(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'destination_ip_prefix': '10.0.0.0/8',
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_flow_classifier_update_logical_source_port(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'logical_source_port': _uuid(),
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_flow_classifier_update_logical_destination_port(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'logical_destination_port': _uuid(),
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_flow_classifier_update_ethertype(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'ethertype': None,
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_flow_classifier_update_protocol(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'protococol': None,
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_flow_classifier_update_l7_parameters(self):
- flowclassifier_id = _uuid()
- data = {'flow_classifier': {
- 'l7_parameters': {},
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(FLOW_CLASSIFIER_PATH, id=flowclassifier_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_flow_classifier_delete(self):
- self._test_entity_delete('flow_classifier')
diff --git a/networking_sfc/tests/unit/extensions/test_sfc.py b/networking_sfc/tests/unit/extensions/test_sfc.py
deleted file mode 100644
index 01b7d8c..0000000
--- a/networking_sfc/tests/unit/extensions/test_sfc.py
+++ /dev/null
@@ -1,751 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import mock
-from webob import exc
-import webtest
-
-from oslo_utils import uuidutils
-
-from neutron.api.v2 import resource as api_res_log
-from neutron.common import config as cfg
-from neutron import manager
-from neutron.notifiers import nova as nova_log
-
-from neutron.tests.unit.api.v2 import test_base as test_api_v2
-from neutron.tests.unit.extensions import base as test_api_v2_extension
-
-from networking_sfc.extensions import sfc as sfc_ext
-
-_uuid = uuidutils.generate_uuid
-_get_path = test_api_v2._get_path
-
-PORT_CHAIN_PATH = (sfc_ext.SFC_PREFIX[1:] + '/port_chains')
-PORT_PAIR_PATH = (sfc_ext.SFC_PREFIX[1:] + '/port_pairs')
-PORT_PAIR_GROUP_PATH = (sfc_ext.SFC_PREFIX[1:] + '/port_pair_groups')
-
-
-class SfcExtensionTestCase(test_api_v2_extension.ExtensionTestCase):
- fmt = 'json'
-
- def setUp(self):
- self._mock_unncessary_looging()
- super(SfcExtensionTestCase, self).setUp()
- self._setUpExtension(
- 'networking_sfc.extensions.sfc.SfcPluginBase',
- sfc_ext.SFC_EXT,
- sfc_ext.RESOURCE_ATTRIBUTE_MAP,
- sfc_ext.Sfc,
- sfc_ext.SFC_PREFIX[1:],
- plural_mappings={}
- )
-
- def _mock_unncessary_looging(self):
- mock_log_cfg_p = mock.patch.object(cfg, 'LOG')
- self.mock_log_cfg = mock_log_cfg_p.start()
-
- mock_log_manager_p = mock.patch.object(manager, 'LOG')
- self.mock_log_manager = mock_log_manager_p.start()
-
- mock_log_nova_p = mock.patch.object(nova_log, 'LOG')
- self.mock_log_nova = mock_log_nova_p.start()
-
- mock_log_api_res_log_p = mock.patch.object(api_res_log, 'LOG')
- self.mock_log_api_res_log = mock_log_api_res_log_p.start()
-
- def _get_expected_port_chain(self, data):
- return {'port_chain': {
- 'description': data['port_chain'].get('description') or '',
- 'name': data['port_chain'].get('name') or '',
- 'port_pair_groups': data['port_chain']['port_pair_groups'],
- 'chain_parameters': data['port_chain'].get(
- 'chain_parameters') or {'correlation': 'mpls'},
- 'flow_classifiers': data['port_chain'].get(
- 'flow_classifiers') or [],
- 'tenant_id': data['port_chain']['tenant_id']
- }}
-
- def test_create_port_chain(self):
- portchain_id = _uuid()
- data = {'port_chain': {
- 'port_pair_groups': [_uuid()],
- 'tenant_id': _uuid()
- }}
- expected_data = self._get_expected_port_chain(data)
- return_value = copy.copy(expected_data['port_chain'])
- return_value.update({'id': portchain_id})
- instance = self.plugin.return_value
- instance.create_port_chain.return_value = return_value
- res = self.api.post(_get_path(PORT_CHAIN_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_port_chain.assert_called_with(
- mock.ANY,
- port_chain=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('port_chain', res)
- self.assertEqual(return_value, res['port_chain'])
-
- def test_create_port_chain_all_fields(self):
- portchain_id = _uuid()
- data = {'port_chain': {
- 'description': 'desc',
- 'name': 'test1',
- 'port_pair_groups': [_uuid()],
- 'chain_parameters': {'correlation': 'mpls'},
- 'flow_classifiers': [],
- 'tenant_id': _uuid()
- }}
- expected_data = self._get_expected_port_chain(data)
- return_value = copy.copy(expected_data['port_chain'])
- return_value.update({'id': portchain_id})
- instance = self.plugin.return_value
- instance.create_port_chain.return_value = return_value
- res = self.api.post(_get_path(PORT_CHAIN_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_port_chain.assert_called_with(
- mock.ANY,
- port_chain=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('port_chain', res)
- self.assertEqual(return_value, res['port_chain'])
-
- def test_create_port_chain_none_chain_parameters(self):
- portchain_id = _uuid()
- data = {'port_chain': {
- 'port_pair_groups': [_uuid()],
- 'chain_parameters': None,
- 'tenant_id': _uuid()
- }}
- expected_data = self._get_expected_port_chain(data)
- return_value = copy.copy(expected_data['port_chain'])
- return_value.update({'id': portchain_id})
- instance = self.plugin.return_value
- instance.create_port_chain.return_value = return_value
- res = self.api.post(_get_path(PORT_CHAIN_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_port_chain.assert_called_with(
- mock.ANY,
- port_chain=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('port_chain', res)
- self.assertEqual(return_value, res['port_chain'])
-
- def test_create_port_chain_empty_chain_parameters(self):
- portchain_id = _uuid()
- data = {'port_chain': {
- 'port_pair_groups': [_uuid()],
- 'chain_parameters': {},
- 'tenant_id': _uuid()
- }}
- expected_data = self._get_expected_port_chain(data)
- return_value = copy.copy(expected_data['port_chain'])
- return_value.update({'id': portchain_id})
- instance = self.plugin.return_value
- instance.create_port_chain.return_value = return_value
- res = self.api.post(_get_path(PORT_CHAIN_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_port_chain.assert_called_with(
- mock.ANY,
- port_chain=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('port_chain', res)
- self.assertEqual(return_value, res['port_chain'])
-
- def test_create_port_chain_empty_port_pair_groups(self):
- data = {'port_chain': {
- 'port_pair_groups': [],
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(PORT_CHAIN_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_create_port_chain_nonuuid_port_pair_groups(self):
- data = {'port_chain': {
- 'port_pair_groups': ['nouuid'],
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(PORT_CHAIN_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_create_port_chain_nonuuid_flow_classifiers(self):
- data = {'port_chain': {
- 'port_pair_groups': [_uuid()],
- 'flow_classifiers': ['nouuid'],
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(PORT_CHAIN_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_create_port_chain_invalid_chain_parameters(self):
- data = {'port_chain': {
- 'port_pair_groups': [_uuid()],
- 'chain_parameters': {'abc': 'def'},
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(PORT_CHAIN_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_port_chain_list(self):
- portchain_id = _uuid()
- return_value = [{
- 'tenant_id': _uuid(),
- 'id': portchain_id
- }]
- instance = self.plugin.return_value
- instance.get_port_chains.return_value = return_value
-
- res = self.api.get(_get_path(PORT_CHAIN_PATH, fmt=self.fmt))
-
- instance.get_port_chains.assert_called_with(
- mock.ANY,
- fields=mock.ANY,
- filters=mock.ANY
- )
- self.assertEqual(res.status_int, exc.HTTPOk.code)
- res = self.deserialize(res)
- self.assertIn('port_chains', res)
- self.assertEqual(res['port_chains'], return_value)
-
- def test_port_chain_get(self):
- portchain_id = _uuid()
- return_value = {
- 'tenant_id': _uuid(),
- 'id': portchain_id
- }
-
- instance = self.plugin.return_value
- instance.get_port_chain.return_value = return_value
-
- res = self.api.get(_get_path(PORT_CHAIN_PATH,
- id=portchain_id, fmt=self.fmt))
-
- instance.get_port_chain.assert_called_with(
- mock.ANY,
- portchain_id,
- fields=mock.ANY
- )
- self.assertEqual(res.status_int, exc.HTTPOk.code)
- res = self.deserialize(res)
- self.assertIn('port_chain', res)
- self.assertEqual(return_value, res['port_chain'])
-
- def test_port_chain_update(self):
- portchain_id = _uuid()
- update_data = {'port_chain': {
- 'name': 'new_name',
- 'description': 'new_desc',
- 'flow_classifiers': [_uuid()]
- }}
- return_value = {
- 'tenant_id': _uuid(),
- 'id': portchain_id
- }
-
- instance = self.plugin.return_value
- instance.update_port_chain.return_value = return_value
-
- res = self.api.put(_get_path(PORT_CHAIN_PATH, id=portchain_id,
- fmt=self.fmt),
- self.serialize(update_data))
-
- instance.update_port_chain.assert_called_with(
- mock.ANY, portchain_id,
- port_chain=update_data)
- self.assertEqual(res.status_int, exc.HTTPOk.code)
- res = self.deserialize(res)
- self.assertIn('port_chain', res)
- self.assertEqual(res['port_chain'], return_value)
-
- def test_port_chain_update_nonuuid_flow_classifiers(self):
- portchain_id = _uuid()
- data = {'port_chain': {
- 'flow_classifiers': ['nouuid'],
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(PORT_CHAIN_PATH, id=portchain_id, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_port_chain_update_port_pair_groups(self):
- portchain_id = _uuid()
- update_data = {'port_chain': {
- 'port_pair_groups': [_uuid()]
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(PORT_CHAIN_PATH, id=portchain_id,
- fmt=self.fmt),
- self.serialize(update_data)
- )
-
- def test_port_chain_update_chain_parameters(self):
- portchain_id = _uuid()
- update_data = {'port_chain': {
- 'chain_parameters': {}
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(PORT_CHAIN_PATH, id=portchain_id,
- fmt=self.fmt),
- self.serialize(update_data)
- )
-
- def test_port_chain_delete(self):
- self._test_entity_delete('port_chain')
-
- def _get_expected_port_pair_group(self, data):
- return {'port_pair_group': {
- 'description': data['port_pair_group'].get('description') or '',
- 'name': data['port_pair_group'].get('name') or '',
- 'port_pairs': data['port_pair_group'].get('port_pairs') or [],
- 'tenant_id': data['port_pair_group']['tenant_id']
- }}
-
- def test_create_port_pair_group(self):
- portpairgroup_id = _uuid()
- data = {'port_pair_group': {
- 'tenant_id': _uuid()
- }}
- expected_data = self._get_expected_port_pair_group(data)
- return_value = copy.copy(expected_data['port_pair_group'])
- return_value.update({'id': portpairgroup_id})
- instance = self.plugin.return_value
- instance.create_port_pair_group.return_value = return_value
- res = self.api.post(
- _get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_port_pair_group.assert_called_with(
- mock.ANY,
- port_pair_group=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('port_pair_group', res)
- self.assertEqual(return_value, res['port_pair_group'])
-
- def test_create_port_pair_group_all_fields(self):
- portpairgroup_id = _uuid()
- data = {'port_pair_group': {
- 'description': 'desc',
- 'name': 'test1',
- 'port_pairs': [],
- 'tenant_id': _uuid()
- }}
- expected_data = self._get_expected_port_pair_group(data)
- return_value = copy.copy(expected_data['port_pair_group'])
- return_value.update({'id': portpairgroup_id})
- instance = self.plugin.return_value
- instance.create_port_pair_group.return_value = return_value
- res = self.api.post(
- _get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_port_pair_group.assert_called_with(
- mock.ANY,
- port_pair_group=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('port_pair_group', res)
- self.assertEqual(return_value, res['port_pair_group'])
-
- def test_create_port_pair_group_nonuuid_port_pairs(self):
- data = {'port_pair_group': {
- 'port_pairs': ['nouuid'],
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_port_pair_group_list(self):
- portpairgroup_id = _uuid()
- return_value = [{
- 'tenant_id': _uuid(),
- 'id': portpairgroup_id
- }]
- instance = self.plugin.return_value
- instance.get_port_pair_groups.return_value = return_value
-
- res = self.api.get(
- _get_path(PORT_PAIR_GROUP_PATH, fmt=self.fmt))
-
- instance.get_port_pair_groups.assert_called_with(
- mock.ANY,
- fields=mock.ANY,
- filters=mock.ANY
- )
- self.assertEqual(res.status_int, exc.HTTPOk.code)
- res = self.deserialize(res)
- self.assertIn('port_pair_groups', res)
- self.assertEqual(res['port_pair_groups'], return_value)
-
- def test_port_pair_group_get(self):
- portpairgroup_id = _uuid()
- return_value = {
- 'tenant_id': _uuid(),
- 'id': portpairgroup_id
- }
-
- instance = self.plugin.return_value
- instance.get_port_pair_group.return_value = return_value
-
- res = self.api.get(_get_path(PORT_PAIR_GROUP_PATH,
- id=portpairgroup_id, fmt=self.fmt))
-
- instance.get_port_pair_group.assert_called_with(
- mock.ANY,
- portpairgroup_id,
- fields=mock.ANY
- )
- self.assertEqual(res.status_int, exc.HTTPOk.code)
- res = self.deserialize(res)
- self.assertIn('port_pair_group', res)
- self.assertEqual(return_value, res['port_pair_group'])
-
- def test_port_pair_group_update(self):
- portpairgroup_id = _uuid()
- update_data = {'port_pair_group': {
- 'name': 'new_name',
- 'description': 'new_desc',
- 'port_pairs': [_uuid()]
- }}
- return_value = {
- 'tenant_id': _uuid(),
- 'id': portpairgroup_id
- }
-
- instance = self.plugin.return_value
- instance.update_port_pair_group.return_value = return_value
-
- res = self.api.put(
- _get_path(
- PORT_PAIR_GROUP_PATH, id=portpairgroup_id,
- fmt=self.fmt),
- self.serialize(update_data))
-
- instance.update_port_pair_group.assert_called_with(
- mock.ANY, portpairgroup_id,
- port_pair_group=update_data)
- self.assertEqual(res.status_int, exc.HTTPOk.code)
- res = self.deserialize(res)
- self.assertIn('port_pair_group', res)
- self.assertEqual(res['port_pair_group'], return_value)
-
- def test_port_pair_group_update_nonuuid_port_pairs(self):
- portpairgroup_id = _uuid()
- data = {'port_pair_group': {
- 'port_pairs': ['nouuid']
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(PORT_PAIR_GROUP_PATH, id=portpairgroup_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_port_pair_group_delete(self):
- self._test_entity_delete('port_pair_group')
-
- def _get_expected_port_pair(self, data):
- return {'port_pair': {
- 'name': data['port_pair'].get('name') or '',
- 'description': data['port_pair'].get('description') or '',
- 'ingress': data['port_pair']['ingress'],
- 'egress': data['port_pair']['egress'],
- 'service_function_parameters': data['port_pair'].get(
- 'service_function_parameters') or {'correlation': None},
- 'tenant_id': data['port_pair']['tenant_id']
- }}
-
- def test_create_port_pair(self):
- portpair_id = _uuid()
- data = {'port_pair': {
- 'ingress': _uuid(),
- 'egress': _uuid(),
- 'tenant_id': _uuid()
- }}
- expected_data = self._get_expected_port_pair(data)
- return_value = copy.copy(expected_data['port_pair'])
- return_value.update({'id': portpair_id})
- instance = self.plugin.return_value
- instance.create_port_pair.return_value = return_value
- res = self.api.post(_get_path(PORT_PAIR_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_port_pair.assert_called_with(
- mock.ANY,
- port_pair=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('port_pair', res)
- self.assertEqual(return_value, res['port_pair'])
-
- def test_create_port_pair_all_fields(self):
- portpair_id = _uuid()
- data = {'port_pair': {
- 'description': 'desc',
- 'name': 'test1',
- 'ingress': _uuid(),
- 'egress': _uuid(),
- 'service_function_parameters': {'correlation': None},
- 'tenant_id': _uuid()
- }}
- expected_data = self._get_expected_port_pair(data)
- return_value = copy.copy(expected_data['port_pair'])
- return_value.update({'id': portpair_id})
- instance = self.plugin.return_value
- instance.create_port_pair.return_value = return_value
- res = self.api.post(_get_path(PORT_PAIR_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_port_pair.assert_called_with(
- mock.ANY,
- port_pair=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('port_pair', res)
- self.assertEqual(return_value, res['port_pair'])
-
- def test_create_port_pair_non_service_function_parameters(self):
- portpair_id = _uuid()
- data = {'port_pair': {
- 'ingress': _uuid(),
- 'egress': _uuid(),
- 'service_function_parameters': None,
- 'tenant_id': _uuid()
- }}
- expected_data = self._get_expected_port_pair(data)
- return_value = copy.copy(expected_data['port_pair'])
- return_value.update({'id': portpair_id})
- instance = self.plugin.return_value
- instance.create_port_pair.return_value = return_value
- res = self.api.post(_get_path(PORT_PAIR_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_port_pair.assert_called_with(
- mock.ANY,
- port_pair=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('port_pair', res)
- self.assertEqual(return_value, res['port_pair'])
-
- def test_create_port_pair_empty_service_function_parameters(self):
- portpair_id = _uuid()
- data = {'port_pair': {
- 'ingress': _uuid(),
- 'egress': _uuid(),
- 'service_function_parameters': {},
- 'tenant_id': _uuid()
- }}
- expected_data = self._get_expected_port_pair(data)
- return_value = copy.copy(expected_data['port_pair'])
- return_value.update({'id': portpair_id})
- instance = self.plugin.return_value
- instance.create_port_pair.return_value = return_value
- res = self.api.post(_get_path(PORT_PAIR_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
- instance.create_port_pair.assert_called_with(
- mock.ANY,
- port_pair=expected_data)
- self.assertEqual(res.status_int, exc.HTTPCreated.code)
- res = self.deserialize(res)
- self.assertIn('port_pair', res)
- self.assertEqual(return_value, res['port_pair'])
-
- def test_create_port_pair_invalid_service_function_parameters(self):
- data = {'port_pair': {
- 'ingress': _uuid(),
- 'egress': _uuid(),
- 'service_function_parameters': {'abc': 'def'},
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(PORT_PAIR_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_create_port_pair_nouuid_ingress(self):
- data = {'port_pair': {
- 'ingress': 'abc',
- 'egress': _uuid(),
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(PORT_PAIR_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_create_port_pair_nouuid_egress(self):
- data = {'port_pair': {
- 'egress': 'abc',
- 'ingress': _uuid(),
- 'tenant_id': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.post,
- _get_path(PORT_PAIR_PATH, fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_port_pair_list(self):
- portpair_id = _uuid()
- return_value = [{
- 'tenant_id': _uuid(),
- 'id': portpair_id
- }]
- instance = self.plugin.return_value
- instance.get_port_pairs.return_value = return_value
-
- res = self.api.get(_get_path(PORT_PAIR_PATH, fmt=self.fmt))
-
- instance.get_port_pairs.assert_called_with(
- mock.ANY,
- fields=mock.ANY,
- filters=mock.ANY
- )
- self.assertEqual(res.status_int, exc.HTTPOk.code)
- res = self.deserialize(res)
- self.assertIn('port_pairs', res)
- self.assertEqual(res['port_pairs'], return_value)
-
- def test_port_pair_get(self):
- portpair_id = _uuid()
- return_value = {
- 'tenant_id': _uuid(),
- 'id': portpair_id
- }
-
- instance = self.plugin.return_value
- instance.get_port_pair.return_value = return_value
-
- res = self.api.get(_get_path(PORT_PAIR_PATH,
- id=portpair_id, fmt=self.fmt))
-
- instance.get_port_pair.assert_called_with(
- mock.ANY,
- portpair_id,
- fields=mock.ANY
- )
- self.assertEqual(res.status_int, exc.HTTPOk.code)
- res = self.deserialize(res)
- self.assertIn('port_pair', res)
- self.assertEqual(return_value, res['port_pair'])
-
- def test_port_pair_update(self):
- portpair_id = _uuid()
- update_data = {'port_pair': {
- 'name': 'new_name',
- 'description': 'new_desc'
- }}
- return_value = {
- 'tenant_id': _uuid(),
- 'id': portpair_id
- }
-
- instance = self.plugin.return_value
- instance.update_port_pair.return_value = return_value
-
- res = self.api.put(_get_path(PORT_PAIR_PATH, id=portpair_id,
- fmt=self.fmt),
- self.serialize(update_data))
-
- instance.update_port_pair.assert_called_with(
- mock.ANY, portpair_id,
- port_pair=update_data)
- self.assertEqual(res.status_int, exc.HTTPOk.code)
- res = self.deserialize(res)
- self.assertIn('port_pair', res)
- self.assertEqual(res['port_pair'], return_value)
-
- def test_port_pair_update_service_function_parameters(self):
- portpair_id = _uuid()
- data = {'port_pair': {
- 'service_function_parameters': None
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(PORT_PAIR_PATH, id=portpair_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_port_pair_update_ingress(self):
- portpair_id = _uuid()
- data = {'port_pair': {
- 'ingress': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(PORT_PAIR_PATH, id=portpair_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_port_pair_update_egress(self):
- portpair_id = _uuid()
- data = {'port_pair': {
- 'egress': _uuid()
- }}
- self.assertRaises(
- webtest.app.AppError,
- self.api.put,
- _get_path(PORT_PAIR_PATH, id=portpair_id,
- fmt=self.fmt),
- self.serialize(data),
- content_type='application/%s' % self.fmt)
-
- def test_port_pair_delete(self):
- self._test_entity_delete('port_pair')
diff --git a/networking_sfc/tests/unit/services/__init__.py b/networking_sfc/tests/unit/services/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/tests/unit/services/__init__.py
+++ /dev/null
diff --git a/networking_sfc/tests/unit/services/flowclassifier/__init__.py b/networking_sfc/tests/unit/services/flowclassifier/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/tests/unit/services/flowclassifier/__init__.py
+++ /dev/null
diff --git a/networking_sfc/tests/unit/services/flowclassifier/test_driver_manager.py b/networking_sfc/tests/unit/services/flowclassifier/test_driver_manager.py
deleted file mode 100644
index 56dd991..0000000
--- a/networking_sfc/tests/unit/services/flowclassifier/test_driver_manager.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import mock
-import pkg_resources
-import six
-import stevedore
-
-from oslo_config import cfg
-
-from neutron.tests import base
-
-from networking_sfc.services.flowclassifier.common import config as fc_config
-from networking_sfc.services.flowclassifier.common import exceptions as fc_exc
-from networking_sfc.services.flowclassifier import driver_manager as fc_driver
-
-
-class DriverManagerTestCase(base.BaseTestCase):
- def setUp(self):
- super(DriverManagerTestCase, self).setUp()
-
- @contextlib.contextmanager
- def driver_manager_context(self, drivers):
- cfg.CONF.register_opts(fc_config.FLOWCLASSIFIER_DRIVER_OPTS,
- 'flowclassifier')
- backup_driver_names = cfg.CONF.flowclassifier.drivers
- driver_names = [
- driver_name for driver_name in six.iterkeys(drivers)
- ]
- cfg.CONF.set_override('drivers', driver_names, 'flowclassifier')
- iter_entry_points = pkg_resources.iter_entry_points
- find_entry_points = stevedore.ExtensionManager._find_entry_points
- pkg_resources.iter_entry_points = mock.Mock()
- stevedore.ExtensionManager._find_entry_points = mock.Mock()
- driver_entry_points = []
- for driver_name in driver_names:
- driver_class = mock.Mock()
- ep = mock.Mock()
- ep.name = driver_name
- ep.resolve.return_value = driver_class
- driver_class.return_value = drivers[driver_name]
- drivers[driver_name].native_bulk_support = True
- driver_entry_points.append(ep)
- pkg_resources.iter_entry_points.return_value = driver_entry_points
- stevedore.ExtensionManager._find_entry_points.return_value = (
- driver_entry_points
- )
- yield fc_driver.FlowClassifierDriverManager()
- cfg.CONF.set_override('drivers', backup_driver_names, 'flowclassifier')
- pkg_resources.iter_entry_points = iter_entry_points
- stevedore.ExtensionManager._find_entry_points = find_entry_points
-
- def test_initialize_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- manager.initialize()
- mock_driver1.initialize.assert_called_once_with()
- mock_driver2.initialize.assert_called_once_with()
-
- def test_create_flow_classifier_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- mocked_context = mock.Mock()
- manager.create_flow_classifier(mocked_context)
- mock_driver1.create_flow_classifier.assert_called_once_with(
- mocked_context)
- mock_driver2.create_flow_classifier.assert_called_once_with(
- mocked_context)
-
- def test_create_flow_classifier_exception(self):
- mock_driver = mock.Mock()
- mock_driver.create_flow_classifier = mock.Mock(
- side_effect=fc_exc.FlowClassifierException
- )
- with self.driver_manager_context({
- 'dummy': mock_driver,
- }) as manager:
- mocked_context = mock.Mock()
- self.assertRaises(
- fc_exc.FlowClassifierDriverError,
- manager.create_flow_classifier, mocked_context
- )
-
- def test_update_flow_classifier_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- mocked_context = mock.Mock()
- manager.update_flow_classifier(mocked_context)
- mock_driver1.update_flow_classifier.assert_called_once_with(
- mocked_context)
- mock_driver2.update_flow_classifier.assert_called_once_with(
- mocked_context)
-
- def test_update_flow_classifier_exception(self):
- mock_driver = mock.Mock()
- mock_driver.update_flow_classifier = mock.Mock(
- side_effect=fc_exc.FlowClassifierException
- )
- with self.driver_manager_context({
- 'dummy': mock_driver,
- }) as manager:
- mocked_context = mock.Mock()
- self.assertRaises(
- fc_exc.FlowClassifierDriverError,
- manager.update_flow_classifier, mocked_context
- )
-
- def test_delete_flow_classifier_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- mocked_context = mock.Mock()
- manager.delete_flow_classifier(mocked_context)
- mock_driver1.delete_flow_classifier.assert_called_once_with(
- mocked_context)
- mock_driver2.delete_flow_classifier.assert_called_once_with(
- mocked_context)
-
- def test_delete_flow_classifier_exception(self):
- mock_driver = mock.Mock()
- mock_driver.delete_flow_classifier = mock.Mock(
- side_effect=fc_exc.FlowClassifierException
- )
- with self.driver_manager_context({
- 'dummy': mock_driver,
- }) as manager:
- mocked_context = mock.Mock()
- self.assertRaises(
- fc_exc.FlowClassifierDriverError,
- manager.delete_flow_classifier, mocked_context
- )
diff --git a/networking_sfc/tests/unit/services/flowclassifier/test_plugin.py b/networking_sfc/tests/unit/services/flowclassifier/test_plugin.py
deleted file mode 100644
index 2c814f8..0000000
--- a/networking_sfc/tests/unit/services/flowclassifier/test_plugin.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import mock
-
-from networking_sfc.services.flowclassifier.common import context as fc_ctx
-from networking_sfc.services.flowclassifier.common import exceptions as fc_exc
-from networking_sfc.tests.unit.db import test_flowclassifier_db
-
-FLOWCLASSIFIER_PLUGIN_KLASS = (
- "networking_sfc.services.flowclassifier."
- "plugin.FlowClassifierPlugin"
-)
-
-
-class FlowClassifierPluginTestCase(
- test_flowclassifier_db.FlowClassifierDbPluginTestCase
-):
- def setUp(
- self, core_plugin=None, flowclassifier_plugin=None, ext_mgr=None
- ):
- if not flowclassifier_plugin:
- flowclassifier_plugin = FLOWCLASSIFIER_PLUGIN_KLASS
- self.driver_manager_p = mock.patch(
- 'networking_sfc.services.flowclassifier.driver_manager.'
- 'FlowClassifierDriverManager'
- )
- self.fake_driver_manager_class = self.driver_manager_p.start()
- self.fake_driver_manager = mock.Mock()
- self.fake_driver_manager_class.return_value = self.fake_driver_manager
- self.plugin_context = None
- super(FlowClassifierPluginTestCase, self).setUp(
- core_plugin=core_plugin,
- flowclassifier_plugin=flowclassifier_plugin,
- ext_mgr=ext_mgr
- )
-
- def _record_context(self, plugin_context):
- self.plugin_context = plugin_context
-
- def test_create_flow_classifier_driver_manager_called(self):
- self.fake_driver_manager.create_flow_classifier = mock.Mock(
- side_effect=self._record_context)
- with self.flow_classifier(flow_classifier={}) as fc:
- driver_manager = self.fake_driver_manager
- driver_manager.create_flow_classifier.assert_called_once_with(
- mock.ANY
- )
- self.assertIsInstance(
- self.plugin_context, fc_ctx.FlowClassifierContext
- )
- self.assertIn('flow_classifier', fc)
- self.assertEqual(
- self.plugin_context.current, fc['flow_classifier'])
-
- def test_create_flow_classifier_driver_manager_exception(self):
- self.fake_driver_manager.create_flow_classifier = mock.Mock(
- side_effect=fc_exc.FlowClassifierDriverError(
- method='create_flow_classifier'
- )
- )
- self._create_flow_classifier(
- self.fmt, {}, expected_res_status=500)
- self._test_list_resources('flow_classifier', [])
- driver_manager = self.fake_driver_manager
- driver_manager.delete_flow_classifier.assert_called_once_with(
- mock.ANY
- )
-
- def test_update_flow_classifier_driver_manager_called(self):
- self.fake_driver_manager.update_flow_classifier = mock.Mock(
- side_effect=self._record_context)
- with self.flow_classifier(flow_classifier={'name': 'test1'}) as fc:
- req = self.new_update_request(
- 'flow_classifiers', {'flow_classifier': {'name': 'test2'}},
- fc['flow_classifier']['id']
- )
- res = self.deserialize(
- self.fmt,
- req.get_response(self.ext_api)
- )
- driver_manager = self.fake_driver_manager
- driver_manager.update_flow_classifier.assert_called_once_with(
- mock.ANY
- )
- self.assertIsInstance(
- self.plugin_context, fc_ctx.FlowClassifierContext
- )
- self.assertIn('flow_classifier', fc)
- self.assertIn('flow_classifier', res)
- self.assertEqual(
- self.plugin_context.current, res['flow_classifier'])
- self.assertEqual(
- self.plugin_context.original, fc['flow_classifier'])
-
- def test_update_flow_classifier_driver_manager_exception(self):
- self.fake_driver_manager.update_flow_classifier = mock.Mock(
- side_effect=fc_exc.FlowClassifierDriverError(
- method='update_flow_classifier'
- )
- )
- with self.flow_classifier(flow_classifier={
- 'name': 'test1'
- }) as fc:
- self.assertIn('flow_classifier', fc)
- original_flow_classifier = fc['flow_classifier']
- req = self.new_update_request(
- 'flow_classifiers', {'flow_classifier': {'name': 'test2'}},
- fc['flow_classifier']['id']
- )
- updated_flow_classifier = copy.copy(original_flow_classifier)
- updated_flow_classifier['name'] = 'test2'
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 500)
- res = self._list('flow_classifiers')
- self.assertIn('flow_classifiers', res)
- self.assertItemsEqual(
- res['flow_classifiers'], [updated_flow_classifier])
-
- def test_delete_flow_classifer_driver_manager_called(self):
- self.fake_driver_manager.delete_flow_classifier = mock.Mock(
- side_effect=self._record_context)
- with self.flow_classifier(
- flow_classifier={}, do_delete=False
- ) as fc:
- req = self.new_delete_request(
- 'flow_classifiers', fc['flow_classifier']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 204)
- driver_manager = self.fake_driver_manager
- driver_manager.delete_flow_classifier.assert_called_once_with(
- mock.ANY
- )
- self.assertIsInstance(
- self.plugin_context, fc_ctx.FlowClassifierContext
- )
- self.assertIn('flow_classifier', fc)
- self.assertEqual(
- self.plugin_context.current, fc['flow_classifier'])
-
- def test_delete_flow_classifier_driver_manager_exception(self):
- self.fake_driver_manager.delete_flow_classifier = mock.Mock(
- side_effect=fc_exc.FlowClassifierDriverError(
- method='delete_flow_classifier'
- )
- )
- with self.flow_classifier(flow_classifier={
- 'name': 'test1'
- }, do_delete=False) as fc:
- req = self.new_delete_request(
- 'flow_classifiers', fc['flow_classifier']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 500)
- self._test_list_resources('flow_classifier', [fc])
diff --git a/networking_sfc/tests/unit/services/sfc/__init__.py b/networking_sfc/tests/unit/services/sfc/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/tests/unit/services/sfc/__init__.py
+++ /dev/null
diff --git a/networking_sfc/tests/unit/services/sfc/agent/__init__.py b/networking_sfc/tests/unit/services/sfc/agent/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/tests/unit/services/sfc/agent/__init__.py
+++ /dev/null
diff --git a/networking_sfc/tests/unit/services/sfc/agent/test-agent.py b/networking_sfc/tests/unit/services/sfc/agent/test-agent.py
deleted file mode 100644
index 113c343..0000000
--- a/networking_sfc/tests/unit/services/sfc/agent/test-agent.py
+++ /dev/null
@@ -1,4012 +0,0 @@
-# Copyright 2015 Huawei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-import six
-
-from neutron.agent.common import ovs_lib
-from neutron.agent.common import utils
-from neutron.agent import rpc as agent_rpc
-from neutron import context
-from neutron.tests import base
-
-from networking_sfc.services.sfc.agent import (
- ovs_sfc_agent as agent)
-from networking_sfc.services.sfc.agent import br_int
-from networking_sfc.services.sfc.agent import br_phys
-from networking_sfc.services.sfc.agent import br_tun
-from networking_sfc.services.sfc.common import ovs_ext_lib
-
-
-class OVSSfcAgentTestCase(base.BaseTestCase):
- def setUp(self):
- super(OVSSfcAgentTestCase, self).setUp()
- mock.patch(
- 'neutron.agent.common.ovs_lib.OVSBridge.get_ports_attributes',
- return_value=[]
- ).start()
- mock.patch(
- 'neutron.agent.common.ovs_lib.BaseOVS.config',
- new_callable=mock.PropertyMock,
- return_value={}
- ).start()
- self.executed_cmds = []
- self.node_flowrules = []
- self.backup_plugin_rpc = agent.SfcPluginApi
- self.plugin_rpc = mock.Mock()
- self.plugin_rpc.get_flowrules_by_host_portid = mock.Mock(
- side_effect=self.mock_get_flowrules_by_host_portid
- )
- self.plugin_rpc.get_all_src_node_flowrules = mock.Mock(
- side_effect=self.mock_get_all_src_node_flowrules
- )
- agent.SfcPluginApi = mock.Mock(
- return_value=self.plugin_rpc
- )
- self.create_consumers = mock.patch.object(
- agent_rpc, "create_consumers",
- self.mock_create_consumers
- )
- self.create_consumers.start()
- self.execute = mock.patch.object(
- utils, "execute", self.mock_execute,
- spec=utils.execute)
- self.execute.start()
- self.added_flows = []
- self.add_flow = mock.patch.object(
- ovs_lib.OVSBridge, "add_flow", self.mock_add_flow
- )
- self.add_flow.start()
- self.deleted_flows = []
- self.delete_flows = mock.patch.object(
- ovs_lib.OVSBridge, "delete_flows", self.mock_delete_flows
- )
- self.delete_flows.start()
- self.int_patch = 1
- self.tun_patch = 2
- self.default_port_mapping = {
- 'patch-int': {
- 'ofport': self.int_patch
- },
- 'patch-tun': {
- 'ofport': self.tun_patch
- }
- }
- self.port_mapping = {}
- self.get_vif_port_by_id = mock.patch.object(
- ovs_lib.OVSBridge, "get_vif_port_by_id",
- self.mock_get_vif_port_by_id
- )
- self.get_vif_port_by_id.start()
- self.get_port_ofport = mock.patch.object(
- ovs_lib.OVSBridge, "get_port_ofport",
- self.mock_get_port_ofport
- )
- self.get_port_ofport.start()
- self.set_secure_mode = mock.patch.object(
- ovs_lib.OVSBridge, "set_secure_mode",
- self.mock_set_secure_mode
- )
- self.set_secure_mode.start()
- self.protocols = []
- self.set_protocols = mock.patch.object(
- ovs_lib.OVSBridge, "set_protocols",
- self.mock_set_protocols
- )
- self.set_protocols.start()
- self.del_controller = mock.patch.object(
- ovs_lib.OVSBridge, "del_controller",
- self.mock_del_controller
- )
- self.del_controller.start()
- self.get_bridges = mock.patch.object(
- ovs_lib.BaseOVS, "get_bridges",
- self.mock_get_bridges
- )
- self.get_bridges.start()
- self.get_vif_ports = mock.patch.object(
- ovs_lib.OVSBridge, "get_vif_ports",
- self.mock_get_vif_ports
- )
- self.get_vif_ports.start()
- self.get_ports_attributes = mock.patch.object(
- ovs_lib.OVSBridge, "get_ports_attributes",
- self.mock_get_ports_attributes
- )
- self.get_ports_attributes.start()
- self.delete_port = mock.patch.object(
- ovs_lib.OVSBridge, "delete_port",
- self.mock_delete_port
- )
- self.delete_port.start()
- self.create = mock.patch.object(
- ovs_lib.OVSBridge, "create",
- self.mock_create
- )
- self.create.start()
- self.add_port = mock.patch.object(
- ovs_lib.OVSBridge, "add_port",
- self.mock_add_port
- )
- self.add_port.start()
- self.bridge_exists = mock.patch.object(
- ovs_lib.BaseOVS, "bridge_exists",
- self.mock_bridge_exists
- )
- self.bridge_exists.start()
- self.port_exists = mock.patch.object(
- ovs_lib.BaseOVS, "port_exists",
- self.mock_port_exists
- )
- self.port_exists.start()
- self.apply_flows = mock.patch.object(
- ovs_lib.DeferredOVSBridge, "apply_flows",
- self.mock_apply_flows
- )
- self.apply_flows.start()
- self.group_mapping = {}
- self.deleted_groups = []
- self.dump_group_for_id = mock.patch.object(
- ovs_ext_lib.OVSBridgeExt, "dump_group_for_id",
- self.mock_dump_group_for_id
- )
- self.dump_group_for_id.start()
- self.add_group = mock.patch.object(
- ovs_ext_lib.OVSBridgeExt, "add_group",
- self.mock_add_group
- )
- self.add_group.start()
- self.mod_group = mock.patch.object(
- ovs_ext_lib.OVSBridgeExt, "mod_group",
- self.mock_mod_group
- )
- self.mod_group.start()
- self.delete_group = mock.patch.object(
- ovs_ext_lib.OVSBridgeExt, "delete_group",
- self.mock_delete_group
- )
- self.delete_group.start()
- self.local_ip = '10.0.0.1'
- self.bridge_classes = {
- 'br_int': br_int.OVSIntegrationBridge,
- 'br_phys': br_phys.OVSPhysicalBridge,
- 'br_tun': br_tun.OVSTunnelBridge,
- }
- self.context = context.get_admin_context_without_session()
- self.init_agent()
-
- def init_agent(self):
- self.added_flows = []
- self.deleted_flows = []
- self.group_mapping = {}
- self.deleted_groups = []
- self.agent = agent.OVSSfcAgent(
- self.bridge_classes,
- 'br-int',
- 'br-tun',
- self.local_ip,
- {},
- 2,
- tunnel_types=['gre', 'vxlan']
- )
-
- def mock_create_consumers(
- self, endpoints, prefix, topic_details, start_listening=True
- ):
- self.added_flows = []
- self.deleted_flows = []
- return mock.Mock()
-
- def mock_delete_group(self, group_id):
- if group_id == 'all':
- self.group_mapping = {}
- else:
- if group_id in self.group_mapping:
- del self.group_mapping[group_id]
- else:
- self.deleted_groups.append(group_id)
-
- def mock_mod_group(self, group_id, **kwargs):
- kwargs['group_id'] = group_id
- self.group_mapping[group_id] = kwargs
-
- def mock_add_group(self, group_id, **kwargs):
- kwargs['group_id'] = group_id
- self.group_mapping[group_id] = kwargs
-
- def mock_dump_group_for_id(self, group_id):
- if group_id in self.group_mapping:
- group_list = []
- group = self.group_mapping[group_id]
- for group_key, group_value in six.iteritems(group):
- group_list.append('%s=%s' % (group_key, group_value))
- return ' '.join(group_list)
- else:
- return ''
-
- def mock_set_secure_mode(self):
- pass
-
- def mock_set_protocols(self, protocols):
- self.protocols = protocols
-
- def mock_del_controller(self):
- pass
-
- def mock_get_bridges(self):
- return ['br-int', 'br-tun']
-
- def mock_get_port_ofport(self, port_name):
- for port_id, port_values in six.iteritems(self.port_mapping):
- if port_values['port_name'] == port_name:
- return port_values['ofport']
- if port_name in self.default_port_mapping:
- return self.default_port_mapping[port_name]['ofport']
- return ovs_lib.INVALID_OFPORT
-
- def mock_add_port(self, port_name, *interface_attr_tuples):
- return self.mock_get_port_ofport(port_name)
-
- def mock_bridge_exists(self, bridge_name):
- return True
-
- def mock_port_exists(self, port_name):
- return True
-
- def mock_apply_flows(self):
- pass
-
- def mock_get_vif_port_by_id(self, port_id):
- if port_id in self.port_mapping:
- port_values = self.port_mapping[port_id]
- return ovs_lib.VifPort(
- port_values['port_name'],
- port_values['ofport'],
- port_id,
- port_values['vif_mac'],
- self.agent.int_br
- )
-
- def mock_get_vif_ports(self):
- vif_ports = []
- for port_id, port_values in six.iteritems(self.port_mapping):
- vif_ports.append(
- ovs_lib.VifPort(
- port_values['port_name'],
- port_values['ofport'],
- port_id,
- port_values['vif_mac'],
- self.agent.int_br
- )
- )
- return vif_ports
-
- def mock_get_ports_attributes(
- self, table, columns=None, ports=None,
- check_error=True, log_errors=True,
- if_exists=False
- ):
- port_infos = []
- for port_id, port_values in six.iteritems(self.port_mapping):
- port_info = {}
- if columns:
- if 'name' in columns:
- port_info['name'] = port_values['port_name']
- if 'ofport' in columns:
- port_info['ofport'] = port_values['ofport']
- if 'extenal_ids' in columns:
- port_info['extenal_ids'] = {
- 'iface-id': port_id,
- 'attached-mac': port_values['vif_mac']
- }
- if 'other_config' in columns:
- port_info['other_config'] = {}
- if 'tag' in columns:
- port_info['tag'] = []
- else:
- port_info = {
- 'name': port_values['port_name'],
- 'ofport': port_values['ofport'],
- 'extenal_ids': {
- 'iface-id': port_id,
- 'attached-mac': port_values['vif_mac']
- },
- 'other_config': {},
- 'tag': []
- }
- if ports:
- if port_values['port_name'] in ports:
- port_infos.append(port_info)
- else:
- port_infos.append(port_info)
- return port_infos
-
- def mock_delete_port(self, port_name):
- found_port_id = None
- for port_id, port_values in six.iteritems(self.port_mapping):
- if port_values['port_name'] == port_name:
- found_port_id = port_id
- if found_port_id:
- del self.port_mapping[found_port_id]
-
- def mock_create(self, secure_mode=False):
- pass
-
- def mock_add_flow(self, *args, **kwargs):
- if kwargs not in self.added_flows:
- self.added_flows.append(kwargs)
-
- def mock_delete_flows(self, *args, **kwargs):
- if kwargs not in self.deleted_flows:
- self.deleted_flows.append(kwargs)
-
- def mock_get_flowrules_by_host_portid(self, context, port_id):
- return [
- flowrule
- for flowrule in self.node_flowrules
- if (
- flowrule['ingress'] == port_id or
- flowrule['egress'] == port_id
- )
- ]
-
- def mock_get_all_src_node_flowrules(self, context):
- return [
- flowrule
- for flowrule in self.node_flowrules
- if (
- flowrule['node_type'] == 'src_node' and
- flowrule['egress'] is None
- )
- ]
-
- def mock_execute(self, cmd, *args, **kwargs):
- self.executed_cmds.append(' '.join(cmd))
-
- def tearDown(self):
- agent.SfcPluginApi = self.backup_plugin_rpc
- self.create_consumers.stop()
- self.execute.stop()
- self.add_flow.stop()
- self.delete_flows.stop()
- self.get_vif_port_by_id.stop()
- self.get_port_ofport.stop()
- self.set_secure_mode.stop()
- self.set_protocols.stop()
- self.del_controller.stop()
- self.get_bridges.stop()
- self.get_vif_ports.stop()
- self.get_ports_attributes.stop()
- self.delete_port.stop()
- self.create.stop()
- self.add_port.stop()
- self.bridge_exists.stop()
- self.port_exists.stop()
- self.apply_flows.stop()
- self.dump_group_for_id.stop()
- self.add_group.stop()
- self.mod_group.stop()
- self.delete_group.stop()
- self.node_flowrules = []
- self.added_flows = []
- self.deleted_flows = []
- self.group_mapping = {}
- self.deleted_groups = []
- self.port_mapping = {}
- super(OVSSfcAgentTestCase, self).tearDown()
-
- def test_update_empty_flow_rules(self):
- self.port_mapping = {
- 'dd7374b9-a6ac-4a66-a4a6-7d3dee2a1579': {
- 'port_name': 'src_port',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '2f1d2140-42ce-4979-9542-7ef25796e536': {
- 'port_name': 'dst_port',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- }
- }
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- }
- )
- self.assertEqual(
- self.executed_cmds, [
- ]
- )
- self.assertEqual(
- self.added_flows, [{
- 'actions': 'resubmit(,%d)' % agent.SF_SELECTOR,
- 'dl_type': 34887,
- 'priority': agent.PC_DEF_PRI,
- 'table': 0
- }, {
- 'actions': 'resubmit(,%d)' % agent.FWD_SELECTOR,
- 'dl_type': 34887,
- 'priority': agent.PC_DEF_PRI
- }, {
- 'actions': 'output:%d' % self.int_patch,
- 'priority': 0,
- 'table': agent.FWD_SELECTOR
- }, {
- 'actions': 'resubmit(,%d)' % agent.GRP_SELECTOR,
- 'in_port': self.int_patch,
- 'priority': agent.PC_DEF_PRI,
- 'table': agent.FWD_SELECTOR
- }]
- )
- self.assertEqual(
- self.group_mapping, {}
- )
-
- def test_update_flow_rules_port_pair(self):
- self.port_mapping = {
- 'dd7374b9-a6ac-4a66-a4a6-7d3dee2a1579': {
- 'port_name': 'src_port',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '2f1d2140-42ce-4979-9542-7ef25796e536': {
- 'port_name': 'dst_port',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- }
- }
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 254,
- 'ingress': u'dd7374b9-a6ac-4a66-a4a6-7d3dee2a1579',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 75,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:fd:b2',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'sf_node',
- 'egress': u'2f1d2140-42ce-4979-9542-7ef25796e536',
- 'next_group_id': None,
- 'host_id': u'test1',
- 'nsp': 256,
- 'portchain_id': u'84c1411f-7a94-4b4f-9a8b-ad9607c67c76',
- 'add_fcs': [],
- 'id': '611bdc42-12b3-4639-8faf-83da4e6403f7'
- }
- )
- self.assertEqual(
- self.executed_cmds, [
- ]
- )
- self.assertEqual(
- self.added_flows, [{
- 'actions': 'resubmit(,%d)' % agent.SF_SELECTOR,
- 'dl_type': 34887,
- 'priority': agent.PC_DEF_PRI,
- 'table': 0
- }, {
- 'actions': 'resubmit(,%d)' % agent.FWD_SELECTOR,
- 'dl_type': 34887,
- 'priority': agent.PC_DEF_PRI
- }, {
- 'actions': 'output:%d' % self.int_patch,
- 'priority': 0,
- 'table': agent.FWD_SELECTOR
- }, {
- 'actions': 'resubmit(,%d)' % agent.GRP_SELECTOR,
- 'in_port': self.int_patch,
- 'priority': agent.PC_DEF_PRI,
- 'table': agent.FWD_SELECTOR
- }, {
- 'actions': 'pop_mpls:0x0800,output:6',
- 'dl_dst': '00:01:02:03:05:07',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 1,
- 'table': agent.SF_SELECTOR
- }]
- )
- self.assertEqual(
- self.group_mapping, {}
- )
-
- def test_update_flow_rules_flow_classifiers(self):
- self.port_mapping = {
- 'e1229670-2a07-450d-bdc9-34e71c301206': {
- 'port_name': 'src_port',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '9bedd01e-c216-4dfd-b48e-fbd5c8212ba4': {
- 'port_name': 'dst_port',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- }
- }
-
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 43,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:72:05',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'src_node',
- 'egress': u'9bedd01e-c216-4dfd-b48e-fbd5c8212ba4',
- 'next_group_id': 1,
- 'host_id': u'test1',
- 'nsp': 256,
- 'portchain_id': u'8cba323e-5e67-4df0-a4b0-7e1ef486a656',
- 'add_fcs': [{
- 'source_port_range_min': 100,
- 'destination_ip_prefix': u'10.200.0.0/16',
- 'protocol': u'tcp',
- 'logical_destination_port': (
- 'e1229670-2a07-450d-bdc9-34e71c301206'),
- 'l7_parameters': {},
- 'source_port_range_max': 200,
- 'source_ip_prefix': u'10.100.0.0/16',
- 'destination_port_range_min': 300,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': 400,
- 'logical_source_port': (
- '9bedd01e-c216-4dfd-b48e-fbd5c8212ba4')
- }],
- 'id': '611bdc42-12b3-4639-8faf-83da4e6403f7'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 253,
- 'ingress': 'e1229670-2a07-450d-bdc9-34e71c301206',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 43,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:c5:f3',
- 'network_type': 'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': u'test2',
- 'nsp': 256,
- 'portchain_id': '8cba323e-5e67-4df0-a4b0-7e1ef486a656',
- 'add_fcs': [{
- 'source_port_range_min': 100,
- 'destination_ip_prefix': u'10.200.0.0/16',
- 'protocol': 'tcp',
- 'logical_destination_port': (
- 'e1229670-2a07-450d-bdc9-34e71c301206'),
- 'l7_parameters': {},
- 'source_port_range_max': 200,
- 'source_ip_prefix': u'10.100.0.0/16',
- 'destination_port_range_min': 300,
- 'ethertype': 'IPv4',
- 'destination_port_range_max': 400,
- 'logical_source_port': (
- '9bedd01e-c216-4dfd-b48e-fbd5c8212ba4')
- }],
- 'id': '611bdc42-12b3-4639-8faf-83da4e6403f8'
- }
- )
- self.assertEqual(
- self.executed_cmds, [
- ]
- )
- self.assertEqual(
- self.added_flows, [{
- 'actions': 'resubmit(,%d)' % agent.SF_SELECTOR,
- 'dl_type': 34887,
- 'priority': agent.PC_DEF_PRI,
- 'table': 0
- }, {
- 'actions': 'resubmit(,%d)' % agent.FWD_SELECTOR,
- 'dl_type': 34887,
- 'priority': agent.PC_DEF_PRI
- }, {
- 'actions': 'output:%d' % self.int_patch,
- 'priority': 0,
- 'table': agent.FWD_SELECTOR
- }, {
- 'actions': 'resubmit(,%d)' % agent.GRP_SELECTOR,
- 'in_port': self.int_patch,
- 'priority': agent.PC_DEF_PRI,
- 'table': agent.FWD_SELECTOR
- }, {
- 'actions': 'pop_mpls:0x0800,output:6',
- 'dl_dst': '00:01:02:03:05:07',
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'priority': 1,
- 'table': agent.SF_SELECTOR
- }]
- )
- self.assertEqual(
- self.group_mapping, {}
- )
-
- def test_update_flow_rules_flow_classifiers_port_pairs(self):
- self.port_mapping = {
- '8768d2b3-746d-4868-ae0e-e81861c2b4e6': {
- 'port_name': 'port1',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '29e38fb2-a643-43b1-baa8-a86596461cd5': {
- 'port_name': 'port2',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- },
- '82a575e0-6a6e-46ba-a5fc-692407839a85': {
- 'port_name': 'port3',
- 'ofport': 60,
- 'vif_mac': '00:01:02:03:06:09'
- },
- '93466f5d-252e-4552-afc6-5fb3f6019f76': {
- 'port_name': 'port4',
- 'ofport': 25,
- 'vif_mac': '00:01:02:03:06:10'
- }
- }
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': [{
- 'local_endpoint': '10.0.0.2',
- 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6',
- 'weight': 1,
- 'mac_address': '12:34:56:78:cf:23'
- }],
- 'del_fcs': [],
- 'segment_id': 33,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:ed:01',
- 'network_type': 'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'src_node',
- 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5',
- 'next_group_id': 1,
- 'host_id': 'test1',
- 'nsp': 256,
- 'portchain_id': 'b9570dc9-822b-41fc-a27c-d915a21a3fe8',
- 'add_fcs': [{
- 'source_port_range_min': 100,
- 'destination_ip_prefix': u'10.200.0.0/16',
- 'protocol': u'tcp',
- 'logical_destination_port': (
- '82a575e0-6a6e-46ba-a5fc-692407839a85'),
- 'l7_parameters': {},
- 'source_port_range_max': 100,
- 'source_ip_prefix': '10.100.0.0/16',
- 'destination_port_range_min': 300,
- 'ethertype': 'IPv4',
- 'destination_port_range_max': 300,
- 'logical_source_port': (
- '29e38fb2-a643-43b1-baa8-a86596461cd5')
- }],
- 'id': '73e97aad-8c0f-44e3-bee0-c0a641b00b66'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 253,
- 'ingress': '82a575e0-6a6e-46ba-a5fc-692407839a85',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 33,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:a6:84',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.2',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': 'test2',
- 'nsp': 256,
- 'portchain_id': 'b9570dc9-822b-41fc-a27c-d915a21a3fe8',
- 'add_fcs': [{
- 'source_port_range_min': 100,
- 'destination_ip_prefix': '10.200.0.0/16',
- 'protocol': u'tcp',
- 'logical_destination_port': (
- '82a575e0-6a6e-46ba-a5fc-692407839a85'),
- 'l7_parameters': {},
- 'source_port_range_max': 100,
- 'source_ip_prefix': u'10.100.0.0/16',
- 'destination_port_range_min': 300,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': 300,
- 'logical_source_port': (
- '29e38fb2-a643-43b1-baa8-a86596461cd5')
- }],
- 'id': 'fa385d84-7d78-44e7-aa8d-7b4a279a14d7'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 254,
- 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6',
- 'next_hops': [{
- 'local_endpoint': '10.0.0.2',
- 'ingress': '82a575e0-6a6e-46ba-a5fc-692407839a85',
- 'weight': 1,
- 'mac_address': '12:34:56:78:a6:84'
- }],
- 'del_fcs': [],
- 'segment_id': 33,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:cf:23',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.2',
- 'node_type': 'sf_node',
- 'egress': '93466f5d-252e-4552-afc6-5fb3f6019f76',
- 'next_group_id': None,
- 'host_id': 'test3',
- 'nsp': 256,
- 'portchain_id': 'b9570dc9-822b-41fc-a27c-d915a21a3fe8',
- 'add_fcs': [{
- 'source_port_range_min': 100,
- 'destination_ip_prefix': '10.200.0.0/16',
- 'protocol': u'tcp',
- 'logical_destination_port': (
- '82a575e0-6a6e-46ba-a5fc-692407839a85'),
- 'l7_parameters': {},
- 'source_port_range_max': 100,
- 'source_ip_prefix': u'10.100.0.0/16',
- 'destination_port_range_min': 300,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': 300,
- 'logical_source_port': (
- '29e38fb2-a643-43b1-baa8-a86596461cd5')
- }],
- 'id': '07cc65a8-e99b-4175-a2f1-69b87eb8090a'
- }
- )
- self.assertEqual(
- self.executed_cmds, [
- ]
- )
- self.assertEqual(
- self.added_flows, [{
- 'actions': 'resubmit(,%d)' % agent.SF_SELECTOR,
- 'dl_type': 34887,
- 'priority': agent.PC_DEF_PRI,
- 'table': 0
- }, {
- 'actions': 'resubmit(,%d)' % agent.FWD_SELECTOR,
- 'dl_type': 34887,
- 'priority': agent.PC_DEF_PRI
- }, {
- 'actions': 'output:%d' % self.int_patch,
- 'priority': 0,
- 'table': agent.FWD_SELECTOR
- }, {
- 'actions': 'resubmit(,%d)' % agent.GRP_SELECTOR,
- 'in_port': self.int_patch,
- 'priority': agent.PC_DEF_PRI,
- 'table': agent.FWD_SELECTOR
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65791,'
- 'set_mpls_ttl:255,output:%d' % self.tun_patch
- ),
- 'dl_type': 2048,
- 'in_port': 42,
- 'nw_dst': u'10.200.0.0/16',
- 'nw_proto': 6,
- 'nw_src': u'10.100.0.0/16',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0x12c/0xffff',
- 'tp_src': '0x64/0xffff'
- }, {
- 'actions': 'group:1',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 0,
- 'table': agent.GRP_SELECTOR
- }, {
- 'actions': 'pop_mpls:0x0800,output:60',
- 'dl_dst': '00:01:02:03:06:09',
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'priority': 1,
- 'table': agent.SF_SELECTOR
- }, {
- 'actions': (
- 'mod_dl_dst:12:34:56:78:a6:84,'
- 'set_field:33->tun_id,output:[]'
- ),
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'nw_dst': u'10.200.0.0/16',
- 'nw_proto': 6,
- 'nw_src': u'10.100.0.0/16',
- 'priority': 0,
- 'table': agent.GRP_SELECTOR,
- 'tp_dst': '0x12c/0xffff',
- 'tp_src': '0x64/0xffff'
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65790,'
- 'set_mpls_ttl:254,output:%d' % self.tun_patch
- ),
- 'dl_type': 2048,
- 'in_port': 25,
- 'nw_dst': u'10.200.0.0/16',
- 'nw_proto': 6,
- 'nw_src': u'10.100.0.0/16',
- 'priority': agent.PC_DEF_PRI,
- 'table': 0,
- 'tp_dst': '0x12c/0xffff',
- 'tp_src': '0x64/0xffff'
- }, {
- 'actions': 'pop_mpls:0x0800,output:6',
- 'dl_dst': '00:01:02:03:05:07',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 1,
- 'table': agent.SF_SELECTOR
- }]
- )
- self.assertEqual(
- self.group_mapping, {
- 1: {
- 'buckets': (
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:cf:23,'
- 'set_field:33->tun_id,output:[]'
- ),
- 'group_id': 1,
- 'type': 'select'
- }
- }
- )
-
- def test_update_flow_rules_flow_classifiers_multi_port_groups(self):
- self.port_mapping = {
- '6331a00d-779b-462b-b0e4-6a65aa3164ef': {
- 'port_name': 'port1',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '1ebf82cf-70f9-43fd-8b90-6546f7d13040': {
- 'port_name': 'port2',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- },
- '34032c43-5207-43bb-95cb-cf426266fa11': {
- 'port_name': 'port3',
- 'ofport': 60,
- 'vif_mac': '00:01:02:03:06:09'
- },
- 'eaeec782-4ee8-4c7f-8ecb-f759dab4c723': {
- 'port_name': 'port4',
- 'ofport': 25,
- 'vif_mac': '00:01:02:03:06:10'
- },
- 'f56df7aa-e521-41ce-9001-ed7bedb65c9e': {
- 'port_name': 'port5',
- 'ofport': 5,
- 'vif_mac': '00:01:02:03:06:11'
- },
- '15dc026d-0520-4f92-9841-1056e62fdcaa': {
- 'port_name': 'port6',
- 'ofport': 50,
- 'vif_mac': '00:01:02:03:06:12'
- },
- 'd98a48fe-4ef7-4aa6-89fa-02312e54c1bd': {
- 'port_name': 'port7',
- 'ofport': 4,
- 'vif_mac': '00:01:02:03:06:13'
- },
- 'd412d042-d8bc-4dd9-b2be-b29c7e8b2c1b': {
- 'port_name': 'port8',
- 'ofport': 8,
- 'vif_mac': '00:01:02:03:06:14'
- }
- }
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': [{
- 'local_endpoint': '10.0.0.2',
- 'ingress': '34032c43-5207-43bb-95cb-cf426266fa11',
- 'weight': 1,
- 'mac_address': '12:34:56:78:b0:88'
- }],
- 'del_fcs': [],
- 'segment_id': 37,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:74:91',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.2',
- 'node_type': 'src_node',
- 'egress': '6331a00d-779b-462b-b0e4-6a65aa3164ef',
- 'next_group_id': 1,
- 'host_id': 'test1',
- 'nsp': 256,
- 'portchain_id': 'd0b48df7-47ab-4909-b864-9aae1a6ee6fb',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- '1ebf82cf-70f9-43fd-8b90-6546f7d13040'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': 'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '6331a00d-779b-462b-b0e4-6a65aa3164ef')
- }],
- 'id': 'bbb1e50c-ecbb-400c-a7e9-8aed8f36993f'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 251,
- 'ingress': '1ebf82cf-70f9-43fd-8b90-6546f7d13040',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 37,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:b7:0d',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.2',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': 'test2',
- 'nsp': 256,
- 'portchain_id': 'd0b48df7-47ab-4909-b864-9aae1a6ee6fb',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- '1ebf82cf-s70f9-43fd-8b90-6546f7d13040'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '6331a00d-779b-462b-b0e4-6a65aa3164ef')
- }],
- 'id': '7ed75c14-2283-484a-97b8-30e23fbf7457'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 254,
- 'ingress': '34032c43-5207-43bb-95cb-cf426266fa11',
- 'next_hops': [{
- 'local_endpoint': u'10.0.0.2',
- 'ingress': u'f56df7aa-e521-41ce-9001-ed7bedb65c9e',
- 'weight': 1,
- 'mac_address': u'12:34:56:78:b1:0d'
- }],
- 'del_fcs': [],
- 'segment_id': 37,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:b0:88',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'sf_node',
- 'egress': u'eaeec782-4ee8-4c7f-8ecb-f759dab4c723',
- 'next_group_id': 2,
- 'host_id': u'test3',
- 'nsp': 256,
- 'portchain_id': u'd0b48df7-47ab-4909-b864-9aae1a6ee6fb',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- '1ebf82cf-70f9-43fd-8b90-6546f7d13040'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '6331a00d-779b-462b-b0e4-6a65aa3164ef')
- }],
- 'id': 'f9fd9c7a-0100-43fb-aea9-30c67f2a731a'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 253,
- 'ingress': 'f56df7aa-e521-41ce-9001-ed7bedb65c9e',
- 'next_hops': [{
- 'local_endpoint': '10.0.0.2',
- 'ingress': 'd98a48fe-4ef7-4aa6-89fa-02312e54c1bd',
- 'weight': 1,
- 'mac_address': '12:34:56:78:4e:dd'
- }],
- 'del_fcs': [],
- 'segment_id': 37,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:b1:0d',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'sf_node',
- 'egress': u'15dc026d-0520-4f92-9841-1056e62fdcaa',
- 'next_group_id': 3,
- 'host_id': u'test5',
- 'nsp': 256,
- 'portchain_id': u'd0b48df7-47ab-4909-b864-9aae1a6ee6fb',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- '1ebf82cf-70f9-43fd-8b90-6546f7d13040'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '6331a00d-779b-462b-b0e4-6a65aa3164ef')
- }],
- 'id': '62f4bb35-1b4a-4cc4-bf07-f40ed5c2d6a7'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 252,
- 'ingress': u'd98a48fe-4ef7-4aa6-89fa-02312e54c1bd',
- 'next_hops': [{
- 'local_endpoint': u'10.0.0.2',
- 'ingress': u'1ebf82cf-70f9-43fd-8b90-6546f7d13040',
- 'weight': 1,
- 'mac_address': u'12:34:56:78:b7:0d'
- }],
- 'del_fcs': [],
- 'segment_id': 37,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:4e:dd',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'sf_node',
- 'egress': u'd412d042-d8bc-4dd9-b2be-b29c7e8b2c1b',
- 'next_group_id': None,
- 'host_id': u'test7',
- 'nsp': 256,
- 'portchain_id': u'd0b48df7-47ab-4909-b864-9aae1a6ee6fb',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- '1ebf82cf-70f9-43fd-8b90-6546f7d13040'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '6331a00d-779b-462b-b0e4-6a65aa3164ef')
- }],
- 'id': 'a535e740-02cc-47ef-aab1-7bcb1594db9b'
- }
- )
- self.assertEqual(
- self.executed_cmds, [
- ]
- )
- self.assertEqual(
- self.added_flows, [{
- 'actions': 'resubmit(,5)',
- 'dl_type': 34887,
- 'priority': 10,
- 'table': 0
- }, {
- 'actions': 'resubmit(,30)',
- 'dl_type': 34887,
- 'priority': 10
- }, {
- 'actions': 'output:%d' % self.int_patch,
- 'priority': 0,
- 'table': 30
- }, {
- 'actions': 'resubmit(,31)',
- 'in_port': self.int_patch,
- 'priority': 10,
- 'table': 30
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65791,'
- 'set_mpls_ttl:255,output:%d' % self.tun_patch
- ),
- 'dl_type': 2048,
- 'in_port': 6,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': 'group:1',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 0,
- 'table': 31
- }, {
- 'actions': (
- 'pop_mpls:0x0800,'
- 'output:42'
- ),
- 'dl_dst': '00:01:02:03:06:08',
- 'dl_type': 34887,
- 'mpls_label': 65788,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65790,'
- 'set_mpls_ttl:254,output:%d' % self.tun_patch
- ),
- 'dl_type': 2048,
- 'in_port': 25,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': 'group:2',
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'priority': 0,
- 'table': 31
- }, {
- 'actions': (
- 'pop_mpls:0x0800,'
- 'output:60'
- ),
- 'dl_dst': '00:01:02:03:06:09',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65789,'
- 'set_mpls_ttl:253,output:%d' % self.tun_patch
- ),
- 'dl_type': 2048,
- 'in_port': 50,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': 'group:3',
- 'dl_type': 34887,
- 'mpls_label': 65789,
- 'priority': 0,
- 'table': 31
- }, {
- 'actions': (
- 'pop_mpls:0x0800,'
- 'output:5'
- ),
- 'dl_dst': '00:01:02:03:06:11',
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': (
- 'mod_dl_dst:12:34:56:78:b7:0d,'
- 'set_field:37->tun_id,output:[]'
- ),
- 'dl_type': 34887,
- 'mpls_label': 65788,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 0,
- 'table': 31,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65788,'
- 'set_mpls_ttl:252,output:%d' % self.tun_patch
- ),
- 'dl_type': 2048,
- 'in_port': 8,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'pop_mpls:0x0800,'
- 'output:4'
- ),
- 'dl_dst': '00:01:02:03:06:13',
- 'dl_type': 34887,
- 'mpls_label': 65789,
- 'priority': 1,
- 'table': 5
- }]
- )
- self.assertEqual(
- self.group_mapping, {
- 1: {
- 'buckets': (
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:b0:88,'
- 'set_field:37->tun_id,output:[]'
- ),
- 'group_id': 1,
- 'type': 'select'
- },
- 2: {
- 'buckets': (
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:b1:0d,'
- 'set_field:37->tun_id,output:[]'
- ),
- 'group_id': 2,
- 'type': 'select'
- },
- 3: {
- 'buckets': (
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:4e:dd,'
- 'set_field:37->tun_id,output:[]'
- ),
- 'group_id': 3,
- 'type': 'select'
- }
- }
- )
-
- def test_update_flow_rules_flow_classifiers_multi_port_pairs(self):
- self.port_mapping = {
- '9864d8e8-0aff-486e-8b84-7a8d20c017d4': {
- 'port_name': 'port1',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '21047d09-eaa7-4296-af56-b509e4a10853': {
- 'port_name': 'port2',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- },
- '38266cfe-cd42-413e-80ff-d0d0c74ad260': {
- 'port_name': 'port3',
- 'ofport': 60,
- 'vif_mac': '00:01:02:03:06:09'
- },
- '272be90c-b140-4e9d-8dd3-1993fbb3656c': {
- 'port_name': 'port4',
- 'ofport': 25,
- 'vif_mac': '00:01:02:03:06:10'
- },
- 'd1791c8d-a07a-4f35-bd52-b99395da0d76': {
- 'port_name': 'port5',
- 'ofport': 5,
- 'vif_mac': '00:01:02:03:06:11'
- },
- 'ed2804bd-d61a-49e7-9007-76d2540ae78a': {
- 'port_name': 'port6',
- 'ofport': 50,
- 'vif_mac': '00:01:02:03:06:12'
- },
- 'bdf4f759-ca35-4cf5-89ac-53da0d6b3fbf': {
- 'port_name': 'port7',
- 'ofport': 4,
- 'vif_mac': '00:01:02:03:06:13'
- },
- 'a55b9062-d3fa-4dc2-a4df-bb8b2a908c19': {
- 'port_name': 'port8',
- 'ofport': 8,
- 'vif_mac': '00:01:02:03:06:14'
- }
- }
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': [{
- 'local_endpoint': u'10.0.0.2',
- 'ingress': u'38266cfe-cd42-413e-80ff-d0d0c74ad260',
- 'weight': 1,
- 'mac_address': u'12:34:56:78:74:c1'
- }, {
- 'local_endpoint': u'10.0.0.2',
- 'ingress': u'd1791c8d-a07a-4f35-bd52-b99395da0d76',
- 'weight': 1,
- 'mac_address': u'12:34:56:78:4f:6e'
- }, {
- 'local_endpoint': u'10.0.0.2',
- 'ingress': u'bdf4f759-ca35-4cf5-89ac-53da0d6b3fbf',
- 'weight': 1,
- 'mac_address': u'12:34:56:78:d5:66'
- }],
- 'del_fcs': [],
- 'segment_id': 51,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:9c:70',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'src_node',
- 'egress': u'9864d8e8-0aff-486e-8b84-7a8d20c017d4',
- 'next_group_id': 1,
- 'host_id': u'test1',
- 'nsp': 256,
- 'portchain_id': u'3dddbb0c-5ac4-437c-9b62-ed7ddf8df37f',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- '21047d09-eaa7-4296-af56-b509e4a10853'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '9864d8e8-0aff-486e-8b84-7a8d20c017d4')
- }],
- 'id': '677dfe31-8566-4bd8-8a1e-5f8efd7a45eb'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 253,
- 'ingress': u'21047d09-eaa7-4296-af56-b509e4a10853',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 51,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:67:cb',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': u'test2',
- 'nsp': 256,
- 'portchain_id': u'3dddbb0c-5ac4-437c-9b62-ed7ddf8df37f',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- '21047d09-eaa7-4296-af56-b509e4a10853'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '9864d8e8-0aff-486e-8b84-7a8d20c017d4')
- }],
- 'id': '4f275568-38cb-45a1-a162-e0d1d4ef335d'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 254,
- 'ingress': u'38266cfe-cd42-413e-80ff-d0d0c74ad260',
- 'next_hops': [{
- 'local_endpoint': u'10.0.0.2',
- 'ingress': u'21047d09-eaa7-4296-af56-b509e4a10853',
- 'weight': 1,
- 'mac_address': u'12:34:56:78:67:cb'
- }],
- 'del_fcs': [],
- 'segment_id': 51,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:74:c1',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'sf_node',
- 'egress': u'272be90c-b140-4e9d-8dd3-1993fbb3656c',
- 'next_group_id': None,
- 'host_id': u'test3',
- 'nsp': 256,
- 'portchain_id': u'3dddbb0c-5ac4-437c-9b62-ed7ddf8df37f',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- '21047d09-eaa7-4296-af56-b509e4a10853'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '9864d8e8-0aff-486e-8b84-7a8d20c017d4')
- }],
- 'id': '48fd97b1-e166-4aff-906f-8096a48a7cb1'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 254,
- 'ingress': u'd1791c8d-a07a-4f35-bd52-b99395da0d76',
- 'next_hops': [{
- 'local_endpoint': u'10.0.0.2',
- 'ingress': u'21047d09-eaa7-4296-af56-b509e4a10853',
- 'weight': 1,
- 'mac_address': u'12:34:56:78:67:cb'
- }],
- 'del_fcs': [],
- 'segment_id': 51,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:4f:6e',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'sf_node',
- 'egress': u'ed2804bd-d61a-49e7-9007-76d2540ae78a',
- 'next_group_id': None,
- 'host_id': u'test5',
- 'nsp': 256,
- 'portchain_id': u'3dddbb0c-5ac4-437c-9b62-ed7ddf8df37f',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- '21047d09-eaa7-4296-af56-b509e4a10853'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '9864d8e8-0aff-486e-8b84-7a8d20c017d4')
- }],
- 'id': '48fd97b1-e166-4aff-906f-8096a48a7cb1'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 254,
- 'ingress': u'bdf4f759-ca35-4cf5-89ac-53da0d6b3fbf',
- 'next_hops': [{
- 'local_endpoint': u'10.0.0.2',
- 'ingress': u'21047d09-eaa7-4296-af56-b509e4a10853',
- 'weight': 1,
- 'mac_address': u'12:34:56:78:67:cb'
- }],
- 'del_fcs': [],
- 'segment_id': 51,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:d5:66',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'sf_node',
- 'egress': u'a55b9062-d3fa-4dc2-a4df-bb8b2a908c19',
- 'next_group_id': None,
- 'host_id': u'test7',
- 'nsp': 256,
- 'portchain_id': u'3dddbb0c-5ac4-437c-9b62-ed7ddf8df37f',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- '21047d09-eaa7-4296-af56-b509e4a10853'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '9864d8e8-0aff-486e-8b84-7a8d20c017d4')
- }],
- 'id': '48fd97b1-e166-4aff-906f-8096a48a7cb1'
- }
- )
- self.assertEqual(
- self.executed_cmds, [
- ]
- )
- self.assertEqual(
- self.added_flows, [{
- 'actions': 'resubmit(,5)',
- 'dl_type': 34887,
- 'priority': 10,
- 'table': 0
- }, {
- 'actions': 'resubmit(,30)',
- 'dl_type': 34887,
- 'priority': 10
- }, {
- 'actions': 'output:%d' % self.int_patch,
- 'priority': 0,
- 'table': 30
- }, {
- 'actions': 'resubmit(,31)',
- 'in_port': self.int_patch,
- 'priority': 10,
- 'table': 30
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65791,'
- 'set_mpls_ttl:255,output:%d' % self.tun_patch
- ),
- 'dl_type': 2048,
- 'in_port': 6,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': 'group:1',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 0,
- 'table': 31
- }, {
- 'actions': (
- 'pop_mpls:0x0800,'
- 'output:42'
- ),
- 'dl_dst': '00:01:02:03:06:08',
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': (
- 'mod_dl_dst:12:34:56:78:67:cb,'
- 'set_field:51->tun_id,output:[]'
- ),
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 0,
- 'table': 31,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65790,'
- 'set_mpls_ttl:254,output:%d' % self.tun_patch
- ),
- 'dl_type': 2048,
- 'in_port': 25,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'pop_mpls:0x0800,'
- 'output:60'
- ),
- 'dl_dst': '00:01:02:03:06:09',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65790,'
- 'set_mpls_ttl:254,output:%d' % self.tun_patch
- ),
- 'dl_type': 2048,
- 'in_port': 50,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'pop_mpls:0x0800,'
- 'output:5'
- ),
- 'dl_dst': '00:01:02:03:06:11',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65790,'
- 'set_mpls_ttl:254,output:%d' % self.tun_patch
- ),
- 'dl_type': 2048,
- 'in_port': 8,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'pop_mpls:0x0800,'
- 'output:4'
- ),
- 'dl_dst': '00:01:02:03:06:13',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 1,
- 'table': 5
- }]
- )
- self.assertEqual(
- self.group_mapping, {
- 1: {
- 'buckets': (
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:74:c1,'
- 'set_field:51->tun_id,output:[],'
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:4f:6e,'
- 'set_field:51->tun_id,output:[],'
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:d5:66,'
- 'set_field:51->tun_id,output:[]'
- ),
- 'group_id': 1,
- 'type': 'select'
- }
- }
- )
-
- def test_update_flow_rules_multi_flow_classifiers(self):
- self.port_mapping = {
- '54abe601-6685-4c38-9b9d-0d8381a43d56': {
- 'port_name': 'port1',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- 'c2de00c2-bd91-4f60-8a7d-5a3ea8f65e77': {
- 'port_name': 'port2',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- },
- '460a5875-b0c6-408e-ada4-0ef01d39bcff': {
- 'port_name': 'port3',
- 'ofport': 60,
- 'vif_mac': '00:01:02:03:06:09'
- },
- 'b2b8a556-593b-4695-8812-cdd33a314867': {
- 'port_name': 'port4',
- 'ofport': 25,
- 'vif_mac': '00:01:02:03:06:10'
- },
- '2656a373-a985-4940-90d1-cfe172951e0c': {
- 'port_name': 'port5',
- 'ofport': 5,
- 'vif_mac': '00:01:02:03:06:11'
- },
- 'a979a847-3014-43ea-b37d-5a3775a173c7': {
- 'port_name': 'port6',
- 'ofport': 50,
- 'vif_mac': '00:01:02:03:06:12'
- }
- }
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': [{
- 'local_endpoint': u'10.0.0.2',
- 'ingress': u'2656a373-a985-4940-90d1-cfe172951e0c',
- 'weight': 1,
- 'mac_address': u'12:34:56:78:5f:ea'
- }],
- 'del_fcs': [],
- 'segment_id': 58,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:b9:09',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'src_node',
- 'egress': u'54abe601-6685-4c38-9b9d-0d8381a43d56',
- 'next_group_id': 1,
- 'host_id': u'test1',
- 'nsp': 256,
- 'portchain_id': u'3eefdf29-ea8f-4794-a36f-5e60ec7fe208',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- '460a5875-b0c6-408e-ada4-0ef01d39bcff'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '54abe601-6685-4c38-9b9d-0d8381a43d56')
- }],
- 'id': 'd2e675d3-739e-4451-95d5-a15e23c6eaac'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': [{
- 'local_endpoint': u'10.0.0.2',
- 'ingress': u'2656a373-a985-4940-90d1-cfe172951e0c',
- 'weight': 1,
- 'mac_address': u'12:34:56:78:5f:ea'
- }],
- 'del_fcs': [],
- 'segment_id': 58,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:4d:d1',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'src_node',
- 'egress': u'c2de00c2-bd91-4f60-8a7d-5a3ea8f65e77',
- 'next_group_id': 1,
- 'host_id': u'test3',
- 'nsp': 256,
- 'portchain_id': u'3eefdf29-ea8f-4794-a36f-5e60ec7fe208',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- 'b2b8a556-593b-4695-8812-cdd33a314867'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- 'c2de00c2-bd91-4f60-8a7d-5a3ea8f65e77')
- }],
- 'id': 'd2e675d3-739e-4451-95d5-a15e23c6eaac'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 253,
- 'ingress': u'460a5875-b0c6-408e-ada4-0ef01d39bcff',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 58,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:fc:b8',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': u'test2',
- 'nsp': 256,
- 'portchain_id': u'3eefdf29-ea8f-4794-a36f-5e60ec7fe208',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- '460a5875-b0c6-408e-ada4-0ef01d39bcff'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '54abe601-6685-4c38-9b9d-0d8381a43d56')
- }],
- 'id': '029823ae-8524-4e1c-8f5b-4ee7ec55f1bd'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 253,
- 'ingress': u'b2b8a556-593b-4695-8812-cdd33a314867',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 58,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:7b:15',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': u'test4',
- 'nsp': 256,
- 'portchain_id': u'3eefdf29-ea8f-4794-a36f-5e60ec7fe208',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- 'b2b8a556-593b-4695-8812-cdd33a314867'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- 'c2de00c2-bd91-4f60-8a7d-5a3ea8f65e77')
- }],
- 'id': '029823ae-8524-4e1c-8f5b-4ee7ec55f1bd'
- }
- )
- self.agent.update_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 254,
- 'ingress': u'2656a373-a985-4940-90d1-cfe172951e0c',
- 'next_hops': [{
- 'local_endpoint': u'10.0.0.2',
- 'ingress': u'460a5875-b0c6-408e-ada4-0ef01d39bcff',
- 'weight': 1,
- 'mac_address': u'12:34:56:78:fc:b8'
- }, {
- 'local_endpoint': u'10.0.0.2',
- 'ingress': u'b2b8a556-593b-4695-8812-cdd33a314867',
- 'weight': 1,
- 'mac_address': u'12:34:56:78:7b:15'
- }],
- 'del_fcs': [],
- 'segment_id': 58,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:5f:ea',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'sf_node',
- 'egress': u'a979a847-3014-43ea-b37d-5a3775a173c7',
- 'next_group_id': None,
- 'host_id': u'test5',
- 'nsp': 256,
- 'portchain_id': u'3eefdf29-ea8f-4794-a36f-5e60ec7fe208',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- '460a5875-b0c6-408e-ada4-0ef01d39bcff'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '54abe601-6685-4c38-9b9d-0d8381a43d56')
- }, {
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'logical_destination_port': (
- 'b2b8a556-593b-4695-8812-cdd33a314867'),
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'destination_port_range_min': None,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': None,
- 'logical_source_port': (
- 'c2de00c2-bd91-4f60-8a7d-5a3ea8f65e77')
- }],
- 'id': '983cfa51-f9e6-4e36-8f6c-0c84df915cd1'
- }
- )
- self.assertEqual(
- self.executed_cmds, [
- ]
- )
- self.assertEqual(
- self.added_flows, [{
- 'actions': 'resubmit(,5)',
- 'dl_type': 34887,
- 'priority': 10,
- 'table': 0
- }, {
- 'actions': 'resubmit(,30)',
- 'dl_type': 34887,
- 'priority': 10
- }, {
- 'actions': 'output:%d' % self.int_patch,
- 'priority': 0,
- 'table': 30
- }, {
- 'actions': 'resubmit(,31)',
- 'in_port': self.int_patch,
- 'priority': 10,
- 'table': 30
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65791,'
- 'set_mpls_ttl:255,output:%d' % self.tun_patch
- ),
- 'dl_type': 2048,
- 'in_port': 6,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': 'group:1',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 0,
- 'table': 31
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65791,'
- 'set_mpls_ttl:255,output:%d' % self.tun_patch
- ),
- 'dl_type': 2048,
- 'in_port': 42,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'pop_mpls:0x0800,'
- 'output:60'
- ),
- 'dl_dst': '00:01:02:03:06:09',
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': (
- 'pop_mpls:0x0800,'
- 'output:25'
- ),
- 'dl_dst': '00:01:02:03:06:10',
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': (
- 'mod_dl_dst:12:34:56:78:fc:b8,'
- 'set_field:58->tun_id,output:[]'
- ),
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 0,
- 'table': 31,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65790,'
- 'set_mpls_ttl:254,output:%d' % self.tun_patch
- ),
- 'dl_type': 2048,
- 'in_port': 50,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'mod_dl_dst:12:34:56:78:7b:15,'
- 'set_field:58->tun_id,output:[]'
- ),
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 0,
- 'table': 31,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'pop_mpls:0x0800,'
- 'output:5'
- ),
- 'dl_dst': '00:01:02:03:06:11',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 1,
- 'table': 5
- }]
- )
- self.assertEqual(
- self.group_mapping, {
- 1: {
- 'buckets': (
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:5f:ea,'
- 'set_field:58->tun_id,output:[]'
- ),
- 'group_id': 1,
- 'type': 'select'
- }
- }
- )
-
- def test_delete_flow_rules_port_pair(self):
- self.port_mapping = {
- 'dd7374b9-a6ac-4a66-a4a6-7d3dee2a1579': {
- 'port_name': 'src_port',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '2f1d2140-42ce-4979-9542-7ef25796e536': {
- 'port_name': 'dst_port',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- }
- }
- self.agent.delete_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 254,
- 'ingress': u'dd7374b9-a6ac-4a66-a4a6-7d3dee2a1579',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 75,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:fd:b2',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'sf_node',
- 'egress': u'2f1d2140-42ce-4979-9542-7ef25796e536',
- 'next_group_id': None,
- 'host_id': u'test1',
- 'nsp': 256,
- 'portchain_id': u'84c1411f-7a94-4b4f-9a8b-ad9607c67c76',
- 'add_fcs': [],
- 'id': '611bdc42-12b3-4639-8faf-83da4e6403f7'
- }
- )
- self.assertEqual(
- self.executed_cmds, [
- ]
- )
- self.assertEqual(
- self.deleted_flows, [{
- 'dl_dst': '00:01:02:03:05:07',
- 'dl_type': 34887,
- 'mpls_label': '65791',
- 'table': 5
- }, {
- 'dl_type': 34887,
- 'mpls_label': '65790',
- 'table': 31
- }]
- )
- self.assertEqual(
- self.deleted_groups, [
- ]
- )
-
- def test_delete_flow_rules_flow_classifiers(self):
- self.port_mapping = {
- 'e1229670-2a07-450d-bdc9-34e71c301206': {
- 'port_name': 'src_port',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '9bedd01e-c216-4dfd-b48e-fbd5c8212ba4': {
- 'port_name': 'dst_port',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- }
- }
-
- self.agent.delete_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': None,
- 'add_fcs': [],
- 'segment_id': 43,
- 'group_refcnt': 1,
- 'mac_address': u'12:34:56:78:72:05',
- 'network_type': u'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'src_node',
- 'egress': u'9bedd01e-c216-4dfd-b48e-fbd5c8212ba4',
- 'next_group_id': 1,
- 'host_id': u'test1',
- 'nsp': 256,
- 'portchain_id': u'8cba323e-5e67-4df0-a4b0-7e1ef486a656',
- 'del_fcs': [{
- 'source_port_range_min': 100,
- 'destination_ip_prefix': u'10.200.0.0/16',
- 'protocol': u'tcp',
- 'logical_destination_port': (
- 'e1229670-2a07-450d-bdc9-34e71c301206'),
- 'l7_parameters': {},
- 'source_port_range_max': 100,
- 'source_ip_prefix': u'10.100.0.0/16',
- 'destination_port_range_min': 300,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': 300,
- 'logical_source_port': (
- '9bedd01e-c216-4dfd-b48e-fbd5c8212ba4')
- }],
- 'id': '611bdc42-12b3-4639-8faf-83da4e6403f7'
- }
- )
- self.agent.delete_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 253,
- 'ingress': 'e1229670-2a07-450d-bdc9-34e71c301206',
- 'next_hops': None,
- 'add_fcs': [],
- 'segment_id': 43,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:c5:f3',
- 'network_type': 'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': u'test2',
- 'nsp': 256,
- 'portchain_id': '8cba323e-5e67-4df0-a4b0-7e1ef486a656',
- 'del_fcs': [{
- 'source_port_range_min': 100,
- 'destination_ip_prefix': u'10.200.0.0/16',
- 'protocol': 'tcp',
- 'logical_destination_port': (
- 'e1229670-2a07-450d-bdc9-34e71c301206'),
- 'l7_parameters': {},
- 'source_port_range_max': 100,
- 'source_ip_prefix': u'10.100.0.0/16',
- 'destination_port_range_min': 300,
- 'ethertype': 'IPv4',
- 'destination_port_range_max': 300,
- 'logical_source_port': (
- '9bedd01e-c216-4dfd-b48e-fbd5c8212ba4')
- }],
- 'id': '611bdc42-12b3-4639-8faf-83da4e6403f8'
- }
- )
- self.assertEqual(
- self.executed_cmds, [
- ]
- )
- self.assertEqual(
- self.deleted_flows, [{
- 'dl_type': 2048,
- 'in_port': 42,
- 'nw_dst': u'10.200.0.0/16',
- 'nw_proto': 6,
- 'nw_src': u'10.100.0.0/16',
- 'table': 0,
- 'tp_dst': '0x12c/0xffff',
- 'tp_src': '0x64/0xffff'
- }, {
- 'dl_type': 34887,
- 'mpls_label': '65791',
- 'table': 31
- }, {
- 'dl_dst': '00:01:02:03:05:07',
- 'dl_type': 34887,
- 'mpls_label': '65790',
- 'table': 5
- }]
- )
- self.assertEqual(
- self.deleted_groups, [1]
- )
-
- def test_delete_flow_rules_flow_classifiers_port_pairs(self):
- self.port_mapping = {
- '8768d2b3-746d-4868-ae0e-e81861c2b4e6': {
- 'port_name': 'port1',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '29e38fb2-a643-43b1-baa8-a86596461cd5': {
- 'port_name': 'port2',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- },
- '82a575e0-6a6e-46ba-a5fc-692407839a85': {
- 'port_name': 'port3',
- 'ofport': 60,
- 'vif_mac': '00:01:02:03:06:09'
- },
- '93466f5d-252e-4552-afc6-5fb3f6019f76': {
- 'port_name': 'port4',
- 'ofport': 25,
- 'vif_mac': '00:01:02:03:06:10'
- }
- }
- self.agent.delete_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': [{
- 'local_endpoint': '10.0.0.2',
- 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6',
- 'weight': 1,
- 'mac_address': '12:34:56:78:cf:23'
- }],
- 'add_fcs': [],
- 'segment_id': 33,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:ed:01',
- 'network_type': 'gre',
- 'local_endpoint': u'10.0.0.2',
- 'node_type': 'src_node',
- 'egress': '29e38fb2-a643-43b1-baa8-a86596461cd5',
- 'next_group_id': 1,
- 'host_id': 'test1',
- 'nsp': 256,
- 'portchain_id': 'b9570dc9-822b-41fc-a27c-d915a21a3fe8',
- 'del_fcs': [{
- 'source_port_range_min': 100,
- 'destination_ip_prefix': u'10.200.0.0/16',
- 'protocol': u'tcp',
- 'logical_destination_port': (
- '82a575e0-6a6e-46ba-a5fc-692407839a85'),
- 'l7_parameters': {},
- 'source_port_range_max': 100,
- 'source_ip_prefix': '10.100.0.0/16',
- 'destination_port_range_min': 300,
- 'ethertype': 'IPv4',
- 'destination_port_range_max': 300,
- 'logical_source_port': (
- '29e38fb2-a643-43b1-baa8-a86596461cd5')
- }],
- 'id': '73e97aad-8c0f-44e3-bee0-c0a641b00b66'
- }
- )
- self.agent.delete_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 253,
- 'ingress': '82a575e0-6a6e-46ba-a5fc-692407839a85',
- 'next_hops': None,
- 'add_fcs': [],
- 'segment_id': 33,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:a6:84',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.2',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': 'test2',
- 'nsp': 256,
- 'portchain_id': 'b9570dc9-822b-41fc-a27c-d915a21a3fe8',
- 'del_fcs': [{
- 'source_port_range_min': 100,
- 'destination_ip_prefix': '10.200.0.0/16',
- 'protocol': u'tcp',
- 'logical_destination_port': (
- '82a575e0-6a6e-46ba-a5fc-692407839a85'),
- 'l7_parameters': {},
- 'source_port_range_max': 100,
- 'source_ip_prefix': u'10.100.0.0/16',
- 'destination_port_range_min': 300,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': 300,
- 'logical_source_port': (
- '29e38fb2-a643-43b1-baa8-a86596461cd5')
- }],
- 'id': 'fa385d84-7d78-44e7-aa8d-7b4a279a14d7'
- }
- )
- self.agent.delete_flow_rules(
- self.context, flowrule_entries={
- 'nsi': 254,
- 'ingress': '8768d2b3-746d-4868-ae0e-e81861c2b4e6',
- 'next_hops': [{
- 'local_endpoint': '10.0.0.2',
- 'ingress': '82a575e0-6a6e-46ba-a5fc-692407839a85',
- 'weight': 1,
- 'mac_address': '12:34:56:78:a6:84'
- }],
- 'add_fcs': [],
- 'segment_id': 33,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:cf:23',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.2',
- 'node_type': 'sf_node',
- 'egress': '93466f5d-252e-4552-afc6-5fb3f6019f76',
- 'next_group_id': None,
- 'host_id': 'test3',
- 'nsp': 256,
- 'portchain_id': 'b9570dc9-822b-41fc-a27c-d915a21a3fe8',
- 'del_fcs': [{
- 'source_port_range_min': 100,
- 'destination_ip_prefix': '10.200.0.0/16',
- 'protocol': u'tcp',
- 'logical_destination_port': (
- '82a575e0-6a6e-46ba-a5fc-692407839a85'),
- 'l7_parameters': {},
- 'source_port_range_max': 100,
- 'source_ip_prefix': u'10.100.0.0/16',
- 'destination_port_range_min': 300,
- 'ethertype': u'IPv4',
- 'destination_port_range_max': 300,
- 'logical_source_port': (
- '29e38fb2-a643-43b1-baa8-a86596461cd5')
- }],
- 'id': '07cc65a8-e99b-4175-a2f1-69b87eb8090a'
- }
- )
- self.assertEqual(
- self.executed_cmds, [
- ]
- )
- self.assertEqual(
- self.deleted_flows, [{
- 'dl_type': 2048,
- 'in_port': 42,
- 'nw_dst': u'10.200.0.0/16',
- 'nw_proto': 6,
- 'nw_src': '10.100.0.0/16',
- 'table': 0,
- 'tp_dst': '0x12c/0xffff',
- 'tp_src': '0x64/0xffff'
- }, {
- 'dl_type': 34887,
- 'mpls_label': '65791',
- 'table': 31
- }, {
- 'dl_dst': '00:01:02:03:06:09',
- 'dl_type': 34887,
- 'mpls_label': '65790',
- 'table': 5
- }, {
- 'dl_dst': '00:01:02:03:05:07',
- 'dl_type': 34887,
- 'mpls_label': '65791',
- 'table': 5
- }, {
- 'dl_type': 2048,
- 'in_port': 25,
- 'nw_dst': '10.200.0.0/16',
- 'nw_proto': 6,
- 'nw_src': u'10.100.0.0/16',
- 'table': 0,
- 'tp_dst': '0x12c/0xffff',
- 'tp_src': '0x64/0xffff'
- }, {
- 'dl_type': 34887,
- 'mpls_label': '65790',
- 'table': 31
- }]
- )
- self.assertEqual(
- self.deleted_groups, [1]
- )
-
- def test_init_agent_empty_flowrules(self):
- self.node_flowrules = []
- self.init_agent()
- self.assertItemsEqual(
- self.added_flows,
- [{
- 'actions': 'resubmit(,5)',
- 'dl_type': 34887,
- 'priority': 10,
- 'table': 0
- }, {
- 'actions': 'resubmit(,30)',
- 'dl_type': 34887,
- 'priority': 10
- }, {
- 'actions': 'output:1',
- 'priority': 0,
- 'table': 30
- }, {
- 'actions': 'resubmit(,31)',
- 'in_port': 1,
- 'priority': 10,
- 'table': 30
- }]
- )
- self.assertEqual(self.group_mapping, {})
-
- def test_init_agent_portchain_portpairs(self):
- self.port_mapping = {
- '4f72c5fc-37e9-4e6f-8cd8-e8166c4b45c4': {
- 'port_name': 'ingress',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '57f35c35-dceb-4934-9a78-b40a0a3e16b3': {
- 'port_name': 'egress',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- }
- }
- self.node_flowrules = [{
- 'nsi': 254,
- 'ingress': '4f72c5fc-37e9-4e6f-8cd8-e8166c4b45c4',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 34,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:2d:f4',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.2',
- 'node_type': 'sf_node',
- 'egress': '57f35c35-dceb-4934-9a78-b40a0a3e16b3',
- 'next_group_id': None,
- 'host_id': u'test2',
- 'nsp': 256,
- 'portchain_id': '0f604e43-c941-4f42-a96c-8bd027e5507d',
- 'add_fcs': [],
- 'id': 'b6ebb2c3-4e9c-4146-8a74-f3985173dc44'
- }]
- self.init_agent()
- for port_id in self.port_mapping:
- self.agent.sfc_treat_devices_added_updated(port_id)
- self.assertItemsEqual(
- self.added_flows,
- [{
- 'actions': 'resubmit(,5)',
- 'dl_type': 34887,
- 'priority': 10,
- 'table': 0
- }, {
- 'actions': 'resubmit(,30)',
- 'dl_type': 34887,
- 'priority': 10
- }, {
- 'actions': 'output:1',
- 'priority': 0,
- 'table': 30
- }, {
- 'actions': 'resubmit(,31)',
- 'in_port': 1,
- 'priority': 10,
- 'table': 30
- }, {
- 'actions': 'pop_mpls:0x0800,output:6',
- 'dl_dst': '00:01:02:03:05:07',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 1,
- 'table': agent.SF_SELECTOR
- }]
- )
- self.assertEqual(self.group_mapping, {})
-
- def test_init_agent_portchain_flowclassifiers(self):
- self.port_mapping = {
- '5aa33c52-535a-48eb-a77c-e02329bb9eb7': {
- 'port_name': 'src_port',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '079d214c-1aea-439d-bf3c-dad03db47dcb': {
- 'port_name': 'dst_port',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- }
- }
- self.node_flowrules = [{
- 'nsi': 253,
- 'ingress': '5aa33c52-535a-48eb-a77c-e02329bb9eb7',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 43,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:ac:22',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.3',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': 'test2',
- 'nsp': 256,
- 'portchain_id': 'd66efb47-f080-41da-8499-c6e89327ecc0',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- '5aa33c52-535a-48eb-a77c-e02329bb9eb7'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': '079d214c-1aea-439d-bf3c-dad03db47dcb'
- }],
- 'id': '9d8ec269-874a-42b2-825f-d25858341ec2'
- }, {
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 43,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:e3:b3',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.2',
- 'node_type': 'src_node',
- 'egress': '079d214c-1aea-439d-bf3c-dad03db47dcb',
- 'next_group_id': 1,
- 'host_id': 'test1',
- 'nsp': 256,
- 'portchain_id': 'd66efb47-f080-41da-8499-c6e89327ecc0',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- '5aa33c52-535a-48eb-a77c-e02329bb9eb7'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': '079d214c-1aea-439d-bf3c-dad03db47dcb'
- }],
- 'id': u'361811ed-2902-4d35-9fe4-a3a2b062ef37'
- }]
- self.init_agent()
- for port_id in self.port_mapping:
- self.agent.sfc_treat_devices_added_updated(port_id)
- self.assertItemsEqual(
- self.added_flows,
- [{
- 'actions': 'resubmit(,5)',
- 'dl_type': 34887,
- 'priority': 10,
- 'table': 0
- }, {
- 'actions': 'resubmit(,30)',
- 'dl_type': 34887,
- 'priority': 10
- }, {
- 'actions': 'output:1',
- 'priority': 0,
- 'table': 30
- }, {
- 'actions': 'resubmit(,31)',
- 'in_port': 1,
- 'priority': 10,
- 'table': 30
- }, {
- 'actions': 'pop_mpls:0x0800,output:6',
- 'dl_dst': '00:01:02:03:05:07',
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'priority': 1,
- 'table': 5
- }]
- )
- self.assertEqual(self.group_mapping, {})
-
- def test_init_agent_portchain_flow_classifiers_port_pairs(self):
- self.port_mapping = {
- '2881f577-3828-40f2-855d-2f86d63a4733': {
- 'port_name': 'dst_port',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '5546e281-319b-4bdd-95c9-37fe4244aeb3': {
- 'port_name': 'ingress',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- },
- 'c45ccd73-46ad-4d91-b44d-68c15a822521': {
- 'port_name': 'egress',
- 'ofport': 43,
- 'vif_mac': '00:01:02:03:06:09'
- },
- 'd2ebbafb-500e-4926-9751-de73906a1e00': {
- 'port_name': 'src_port',
- 'ofport': 44,
- 'vif_mac': '00:01:02:03:06:10'
- }
- }
- self.node_flowrules = [{
- 'nsi': 253,
- 'ingress': '2881f577-3828-40f2-855d-2f86d63a4733',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 67,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:17:0c',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.3',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': 'test2',
- 'nsp': 256,
- 'portchain_id': 'cddb174c-9e50-4411-b844-41ecb9caf4c4',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': u'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- '2881f577-3828-40f2-855d-2f86d63a4733'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': 'd2ebbafb-500e-4926-9751-de73906a1e00'
- }],
- 'id': '752ca419-6729-461f-993f-fbd44bbd0edb'
- }, {
- 'nsi': 254,
- 'ingress': '5546e281-319b-4bdd-95c9-37fe4244aeb3',
- 'next_hops': [{
- 'local_endpoint': '10.0.0.3',
- 'ingress': '2881f577-3828-40f2-855d-2f86d63a4733',
- 'weight': 1,
- 'mac_address': '12:34:56:78:17:0c'
- }],
- 'del_fcs': [],
- 'segment_id': 67,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:ca:de',
- 'network_type': u'gre',
- 'local_endpoint': '10.0.0.4',
- 'node_type': 'sf_node',
- 'egress': 'c45ccd73-46ad-4d91-b44d-68c15a822521',
- 'next_group_id': None,
- 'host_id': 'test4',
- 'nsp': 256,
- 'portchain_id': 'cddb174c-9e50-4411-b844-41ecb9caf4c4',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- '2881f577-3828-40f2-855d-2f86d63a4733'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': 'd2ebbafb-500e-4926-9751-de73906a1e00'
- }],
- 'id': 'f70d81ec-1b7c-4ab4-9cf3-da5375ad47e9'
- }, {
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': [{
- 'local_endpoint': '10.0.0.4',
- 'ingress': '5546e281-319b-4bdd-95c9-37fe4244aeb3',
- 'weight': 1,
- 'mac_address': '12:34:56:78:ca:de'
- }],
- 'del_fcs': [],
- 'segment_id': 67,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:8c:68',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.2',
- 'node_type': 'src_node',
- 'egress': 'd2ebbafb-500e-4926-9751-de73906a1e00',
- 'next_group_id': 1,
- 'host_id': 'test1',
- 'nsp': 256,
- 'portchain_id': 'cddb174c-9e50-4411-b844-41ecb9caf4c4',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- '2881f577-3828-40f2-855d-2f86d63a4733'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': 'd2ebbafb-500e-4926-9751-de73906a1e00'
- }],
- 'id': 'f52624f0-81d9-4041-81cf-dfe151d3a949'
- }]
- self.init_agent()
- for port_id in self.port_mapping:
- self.agent.sfc_treat_devices_added_updated(port_id)
- self.assertItemsEqual(
- self.added_flows, [{
- 'actions': 'resubmit(,5)',
- 'dl_type': 34887,
- 'priority': 10,
- 'table': 0
- }, {
- 'actions': 'resubmit(,30)',
- 'dl_type': 34887,
- 'priority': 10
- }, {
- 'actions': 'output:1',
- 'priority': 0,
- 'table': 30
- }, {
- 'actions': 'resubmit(,31)',
- 'in_port': 1,
- 'priority': 10,
- 'table': 30
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65791,'
- 'set_mpls_ttl:255,'
- 'output:2'
- ),
- 'dl_type': 2048,
- 'in_port': 44,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': 'group:1',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 0,
- 'table': 31
- }, {
- 'actions': (
- 'mod_dl_dst:12:34:56:78:17:0c,'
- 'set_field:67->tun_id,output:[]'
- ),
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 0,
- 'table': 31,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65790,'
- 'set_mpls_ttl:254,output:2'
- ),
- 'dl_type': 2048,
- 'in_port': 43,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': 'pop_mpls:0x0800,output:42',
- 'dl_dst': '00:01:02:03:06:08',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': 'pop_mpls:0x0800,output:6',
- 'dl_dst': '00:01:02:03:05:07',
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'priority': 1,
- 'table': 5
- }]
- )
- self.assertEqual(
- self.group_mapping, {
- 1: {
- 'buckets': (
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:ca:de,'
- 'set_field:67->tun_id,output:[]'
- ),
- 'group_id': 1,
- 'type': 'select'
- }
- }
- )
-
- def test_init_agent_portchain_multi_port_groups_port_pairs(self):
- self.port_mapping = {
- '495d5bcf-f8ef-47d7-995a-5a8ef2e6d1ea': {
- 'port_name': 'ingress1',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '0dd212fb-1e0f-4b1a-abc2-a3a39bbab3ef': {
- 'port_name': 'egress1',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- },
- '6d7aa494-7796-46ea-9cfe-52d2b0f84217': {
- 'port_name': 'src_port',
- 'ofport': 43,
- 'vif_mac': '00:01:02:03:06:09'
- },
- '028c5816-7d4b-453e-8ec2-f3a084ae992f': {
- 'port_name': 'ingress2',
- 'ofport': 44,
- 'vif_mac': '00:01:02:03:06:10'
- },
- '3e4e8d33-334b-4c67-8e04-143eeb6f8351': {
- 'port_name': 'egress2',
- 'ofport': 45,
- 'vif_mac': '00:01:02:03:06:11'
- },
- '73d1dbc7-ba46-4b16-85a0-73b106a96fa1': {
- 'port_name': 'dst_port',
- 'ofport': 46,
- 'vif_mac': '00:01:02:03:06:12'
- },
- '1778085d-9f81-4e1e-9748-0bafece63344': {
- 'port_name': 'ingress3',
- 'ofport': 47,
- 'vif_mac': '00:01:02:03:06:13'
- },
- 'a47cbe65-ea3f-4faa-af27-8212a121c91f': {
- 'port_name': 'egress3',
- 'ofport': 48,
- 'vif_mac': '00:01:02:03:06:14'
- }
- }
- self.node_flowrules = [{
- 'nsi': 254,
- 'ingress': '495d5bcf-f8ef-47d7-995a-5a8ef2e6d1ea',
- 'next_hops': [{
- 'local_endpoint': u'10.0.0.6',
- 'ingress': '73d1dbc7-ba46-4b16-85a0-73b106a96fa1',
- 'weight': 1,
- 'mac_address': '12:34:56:78:51:cc'
- }],
- 'del_fcs': [],
- 'segment_id': 7,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:1d:84',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.4',
- 'node_type': 'sf_node',
- 'egress': '0dd212fb-1e0f-4b1a-abc2-a3a39bbab3ef',
- 'next_group_id': 2,
- 'host_id': 'test3',
- 'nsp': 256,
- 'portchain_id': '0aa6b9fe-6b5e-4b72-91aa-45bce6587ca7',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- 'a47cbe65-ea3f-4faa-af27-8212a121c91f'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': '6d7aa494-7796-46ea-9cfe-52d2b0f84217'
- }],
- 'id': u'1fe85cf2-41fb-4b30-80de-4ae35d3c2b1c'
- }, {
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': [{
- 'local_endpoint': '10.0.0.4',
- 'ingress': '495d5bcf-f8ef-47d7-995a-5a8ef2e6d1ea',
- 'weight': 1,
- 'mac_address': '12:34:56:78:1d:84'
- }],
- 'del_fcs': [],
- 'segment_id': 7,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:45:d7',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.2',
- 'node_type': 'src_node',
- 'egress': '6d7aa494-7796-46ea-9cfe-52d2b0f84217',
- 'next_group_id': 1,
- 'host_id': 'test1',
- 'nsp': 256,
- 'portchain_id': '0aa6b9fe-6b5e-4b72-91aa-45bce6587ca7',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- 'a47cbe65-ea3f-4faa-af27-8212a121c91f'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': '6d7aa494-7796-46ea-9cfe-52d2b0f84217'
- }],
- 'id': '3c4b700b-e993-4378-b41a-95f609b3c799'
- }, {
- 'nsi': 252,
- 'ingress': '028c5816-7d4b-453e-8ec2-f3a084ae992f',
- 'next_hops': [{
- 'local_endpoint': '10.0.0.3',
- 'ingress': 'a47cbe65-ea3f-4faa-af27-8212a121c91f',
- 'weight': 1,
- 'mac_address': '12:34:56:78:54:76'
- }],
- 'del_fcs': [],
- 'segment_id': 7,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:47:34',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.8',
- 'node_type': 'sf_node',
- 'egress': '3e4e8d33-334b-4c67-8e04-143eeb6f8351',
- 'next_group_id': None,
- 'host_id': 'test8',
- 'nsp': 256,
- 'portchain_id': '0aa6b9fe-6b5e-4b72-91aa-45bce6587ca7',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- 'a47cbe65-ea3f-4faa-af27-8212a121c91f'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': u'6d7aa494-7796-46ea-9cfe-52d2b0f84217'
- }],
- 'id': '05574d93-104e-425f-8a30-640721f2c749'
- }, {
- 'nsi': 253,
- 'ingress': '73d1dbc7-ba46-4b16-85a0-73b106a96fa1',
- 'next_hops': [{
- 'local_endpoint': '10.0.0.8',
- 'ingress': '028c5816-7d4b-453e-8ec2-f3a084ae992f',
- 'weight': 1,
- 'mac_address': '12:34:56:78:47:34'
- }],
- 'del_fcs': [],
- 'segment_id': 7,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:51:cc',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.6',
- 'node_type': 'sf_node',
- 'egress': '1778085d-9f81-4e1e-9748-0bafece63344',
- 'next_group_id': 3,
- 'host_id': 'test5',
- 'nsp': 256,
- 'portchain_id': '0aa6b9fe-6b5e-4b72-91aa-45bce6587ca7',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- 'a47cbe65-ea3f-4faa-af27-8212a121c91f'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': '6d7aa494-7796-46ea-9cfe-52d2b0f84217'
- }],
- 'id': u'5038a916-93de-4734-a830-d88c9d65566c'
- }, {
- 'nsi': 251,
- 'ingress': 'a47cbe65-ea3f-4faa-af27-8212a121c91f',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 7,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:54:76',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.3',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': 'test2',
- 'nsp': 256,
- 'portchain_id': '0aa6b9fe-6b5e-4b72-91aa-45bce6587ca7',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- 'a47cbe65-ea3f-4faa-af27-8212a121c91f'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': '6d7aa494-7796-46ea-9cfe-52d2b0f84217'
- }],
- 'id': '42b8abe6-5bfa-47c5-a992-771e333dae52'
- }]
- self.init_agent()
- for port_id in self.port_mapping:
- self.agent.sfc_treat_devices_added_updated(port_id)
- self.assertItemsEqual(
- self.added_flows, [{
- 'actions': 'resubmit(,5)',
- 'dl_type': 34887,
- 'priority': 10,
- 'table': 0
- }, {
- 'actions': 'resubmit(,30)',
- 'dl_type': 34887,
- 'priority': 10
- }, {
- 'actions': 'output:1',
- 'priority': 0,
- 'table': 30
- }, {
- 'actions': 'resubmit(,31)',
- 'in_port': 1,
- 'priority': 10,
- 'table': 30
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65789,'
- 'set_mpls_ttl:253,output:2'
- ),
- 'dl_type': 2048,
- 'in_port': 47,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': 'group:3',
- 'dl_type': 34887,
- 'mpls_label': 65789,
- 'priority': 0,
- 'table': 31
- }, {
- 'actions': (
- 'pop_mpls:0x0800,'
- 'output:46'
- ),
- 'dl_dst': '00:01:02:03:06:12',
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': (
- 'mod_dl_dst:12:34:56:78:54:76,'
- 'set_field:7->tun_id,output:[]'
- ),
- 'dl_type': 34887,
- 'mpls_label': 65788,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 0,
- 'table': 31,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65788,'
- 'set_mpls_ttl:252,output:2'
- ),
- 'dl_type': 2048,
- 'in_port': 45,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': 'pop_mpls:0x0800,output:44',
- 'dl_dst': '00:01:02:03:06:10',
- 'dl_type': 34887,
- 'mpls_label': 65789,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65791,'
- 'set_mpls_ttl:255,output:2'
- ),
- 'dl_type': 2048,
- 'in_port': 43,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': 'group:1',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 0,
- 'table': 31
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65790,'
- 'set_mpls_ttl:254,output:2'
- ),
- 'dl_type': 2048,
- 'in_port': 42,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': 'group:2',
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'priority': 0,
- 'table': 31
- }, {
- 'actions': 'pop_mpls:0x0800,output:6',
- 'dl_dst': '00:01:02:03:05:07',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': 'pop_mpls:0x0800,output:48',
- 'dl_dst': '00:01:02:03:06:14',
- 'dl_type': 34887,
- 'mpls_label': 65788,
- 'priority': 1,
- 'table': 5
- }]
- )
- self.assertEqual(
- self.group_mapping, {
- 1: {
- 'buckets': (
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:1d:84,'
- 'set_field:7->tun_id,output:[]'
- ),
- 'group_id': 1,
- 'type': 'select'
- },
- 2: {
- 'buckets': (
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:51:cc,'
- 'set_field:7->tun_id,output:[]'
- ),
- 'group_id': 2,
- 'type': 'select'
- },
- 3: {
- 'buckets': (
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:47:34,'
- 'set_field:7->tun_id,output:[]'
- ),
- 'group_id': 3,
- 'type': 'select'
- }
- }
- )
-
- def test_init_agent_portchain_port_group_multi_port_pairs(self):
- self.port_mapping = {
- '8849af69-117d-4db9-83fa-85329b0efbd6': {
- 'port_name': 'ingress1',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '51f58f0f-6870-4e75-9fd1-13cf3ce29b3e': {
- 'port_name': 'egress1',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- },
- 'a57a8160-a202-477b-aca1-e7c006bc93a2': {
- 'port_name': 'src_port',
- 'ofport': 43,
- 'vif_mac': '00:01:02:03:06:09'
- },
- '23d02749-7f2b-456d-b9f1-7869300375d4': {
- 'port_name': 'ingress2',
- 'ofport': 44,
- 'vif_mac': '00:01:02:03:06:10'
- },
- 'c5dacf1c-f84a-43e0-8873-b2cba77970af': {
- 'port_name': 'egress2',
- 'ofport': 45,
- 'vif_mac': '00:01:02:03:06:11'
- },
- '2b17abfa-7afb-4e83-8e15-ad21a6044bb7': {
- 'port_name': 'dst_port',
- 'ofport': 46,
- 'vif_mac': '00:01:02:03:06:12'
- },
- 'b299c792-28c8-4f6a-84a0-589163a9b1d4': {
- 'port_name': 'ingress3',
- 'ofport': 47,
- 'vif_mac': '00:01:02:03:06:13'
- },
- '60d47d04-42c0-4478-9136-6247fd5d058d': {
- 'port_name': 'egress3',
- 'ofport': 48,
- 'vif_mac': '00:01:02:03:06:14'
- }
- }
- self.node_flowrules = [{
- 'nsi': 254,
- 'ingress': '8849af69-117d-4db9-83fa-85329b0efbd6',
- 'next_hops': [{
- 'local_endpoint': '10.0.0.3',
- 'ingress': '2b17abfa-7afb-4e83-8e15-ad21a6044bb7',
- 'weight': 1,
- 'mac_address': '12:34:56:78:68:3a'
- }],
- 'del_fcs': [],
- 'segment_id': 68,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:fe:38',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.6',
- 'node_type': 'sf_node',
- 'egress': '51f58f0f-6870-4e75-9fd1-13cf3ce29b3e',
- 'next_group_id': None,
- 'host_id': 'test6',
- 'nsp': 256,
- 'portchain_id': '10f6a764-6963-4b8e-9ae4-a1e5e805915e',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- '2b17abfa-7afb-4e83-8e15-ad21a6044bb7'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': 'a57a8160-a202-477b-aca1-e7c006bc93a2'
- }],
- 'id': u'1409e7b8-ed6f-41ae-ba6b-8ef96bbb8da9'
- }, {
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': [{
- 'local_endpoint': '10.0.0.4',
- 'ingress': 'b299c792-28c8-4f6a-84a0-589163a9b1d4',
- 'weight': 1,
- 'mac_address': '12:34:56:78:58:ee'
- }, {
- 'local_endpoint': '10.0.0.6',
- 'ingress': '8849af69-117d-4db9-83fa-85329b0efbd6',
- 'weight': 1,
- 'mac_address': '12:34:56:78:fe:38'
- }, {
- 'local_endpoint': '10.0.0.8',
- 'ingress': '23d02749-7f2b-456d-b9f1-7869300375d4',
- 'weight': 1,
- 'mac_address': '12:34:56:78:32:30'
- }],
- 'del_fcs': [],
- 'segment_id': 68,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:e0:a9',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.2',
- 'node_type': 'src_node',
- 'egress': 'a57a8160-a202-477b-aca1-e7c006bc93a2',
- 'next_group_id': 1,
- 'host_id': 'test1',
- 'nsp': 256,
- 'portchain_id': '10f6a764-6963-4b8e-9ae4-a1e5e805915e',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- '2b17abfa-7afb-4e83-8e15-ad21a6044bb7'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': (
- 'a57a8160-a202-477b-aca1-e7c006bc93a2')
- }],
- 'id': '6c686bd6-a064-4650-ace7-0bd34fa4238a'
- }, {
- 'nsi': 254,
- 'ingress': '23d02749-7f2b-456d-b9f1-7869300375d4',
- 'next_hops': [{
- 'local_endpoint': '10.0.0.3',
- 'ingress': '2b17abfa-7afb-4e83-8e15-ad21a6044bb7',
- 'weight': 1,
- 'mac_address': '12:34:56:78:68:3a'
- }],
- 'del_fcs': [],
- 'segment_id': 68,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:32:30',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.8',
- 'node_type': 'sf_node',
- 'egress': 'c5dacf1c-f84a-43e0-8873-b2cba77970af',
- 'next_group_id': None,
- 'host_id': u'test8',
- 'nsp': 256,
- 'portchain_id': '10f6a764-6963-4b8e-9ae4-a1e5e805915e',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- '2b17abfa-7afb-4e83-8e15-ad21a6044bb7'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': (
- 'a57a8160-a202-477b-aca1-e7c006bc93a2')
- }],
- 'id': u'1409e7b8-ed6f-41ae-ba6b-8ef96bbb8da9'
- }, {
- 'nsi': 253,
- 'ingress': '2b17abfa-7afb-4e83-8e15-ad21a6044bb7',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 68,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:68:3a',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.3',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': 'test2',
- 'nsp': 256,
- 'portchain_id': '10f6a764-6963-4b8e-9ae4-a1e5e805915e',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- '2b17abfa-7afb-4e83-8e15-ad21a6044bb7'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': (
- 'a57a8160-a202-477b-aca1-e7c006bc93a2')
- }],
- 'id': '12a279c1-cf81-4c1b-bac3-e9690465aeaf'
- }, {
- 'nsi': 254,
- 'ingress': 'b299c792-28c8-4f6a-84a0-589163a9b1d4',
- 'next_hops': [{
- 'local_endpoint': '10.0.0.3',
- 'ingress': '2b17abfa-7afb-4e83-8e15-ad21a6044bb7',
- 'weight': 1,
- 'mac_address': '12:34:56:78:68:3a'
- }],
- 'del_fcs': [],
- 'segment_id': 68,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:58:ee',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.4',
- 'node_type': 'sf_node',
- 'egress': '60d47d04-42c0-4478-9136-6247fd5d058d',
- 'next_group_id': None,
- 'host_id': 'test4',
- 'nsp': 256,
- 'portchain_id': '10f6a764-6963-4b8e-9ae4-a1e5e805915e',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- '2b17abfa-7afb-4e83-8e15-ad21a6044bb7'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': 'a57a8160-a202-477b-aca1-e7c006bc93a2'
- }],
- 'id': '1409e7b8-ed6f-41ae-ba6b-8ef96bbb8da9'
- }]
- self.init_agent()
- for port_id in self.port_mapping:
- self.agent.sfc_treat_devices_added_updated(port_id)
- self.assertItemsEqual(
- self.added_flows, [{
- 'priority': 10,
- 'table': 0,
- 'dl_type': 34887,
- 'actions': 'resubmit(,5)'
- }, {
- 'dl_type': 34887,
- 'priority': 10,
- 'actions': 'resubmit(,30)'
- }, {
- 'priority': 0,
- 'table': 30,
- 'actions': 'output:1'
- }, {
- 'priority': 10,
- 'table': 30,
- 'actions': 'resubmit(,31)',
- 'in_port': 1
- }, {
- 'dl_type': 34887,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'actions': (
- 'mod_dl_dst:12:34:56:78:68:3a,'
- 'set_field:68->tun_id,output:[]'
- ),
- 'priority': 0,
- 'mpls_label': 65790,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0',
- 'table': 31,
- 'nw_src': '0.0.0.0/0.0.0.0'
- }, {
- 'dl_type': 2048,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65790,'
- 'set_mpls_ttl:254,output:2'
- ),
- 'priority': 10,
- 'tp_dst': '0/0x0',
- 'table': 0,
- 'tp_src': '0/0x0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'in_port': 42
- }, {
- 'dl_type': 34887,
- 'actions': 'pop_mpls:0x0800,output:6',
- 'priority': 1,
- 'mpls_label': 65791,
- 'table': 5,
- 'dl_dst': '00:01:02:03:05:07'
- }, {
- 'dl_type': 2048,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65790,'
- 'set_mpls_ttl:254,output:2'
- ),
- 'priority': 10,
- 'tp_dst': '0/0x0',
- 'table': 0,
- 'tp_src': '0/0x0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'in_port': 45
- }, {
- 'dl_type': 34887,
- 'actions': 'pop_mpls:0x0800,output:44',
- 'priority': 1,
- 'mpls_label': 65791,
- 'table': 5,
- 'dl_dst': '00:01:02:03:06:10'
- }, {
- 'dl_type': 2048,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65790,'
- 'set_mpls_ttl:254,output:2'
- ),
- 'priority': 10,
- 'tp_dst': '0/0x0',
- 'table': 0,
- 'tp_src': '0/0x0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'in_port': 48
- }, {
- 'dl_type': 34887,
- 'actions': 'pop_mpls:0x0800,output:47',
- 'priority': 1,
- 'mpls_label': 65791,
- 'table': 5,
- 'dl_dst': '00:01:02:03:06:13'
- }, {
- 'dl_type': 2048,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65791,'
- 'set_mpls_ttl:255,output:2'
- ),
- 'priority': 10,
- 'tp_dst': '0/0x0',
- 'table': 0,
- 'tp_src': '0/0x0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'in_port': 43
- }, {
- 'priority': 0,
- 'table': 31,
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'actions': 'group:1'
- }, {
- 'dl_type': 34887,
- 'actions': 'pop_mpls:0x0800,output:46',
- 'priority': 1,
- 'mpls_label': 65790,
- 'table': 5,
- 'dl_dst': '00:01:02:03:06:12'
- }]
- )
- self.assertEqual(
- self.group_mapping, {
- 1: {
- 'buckets': (
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:58:ee,'
- 'set_field:68->tun_id,output:[],'
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:fe:38,'
- 'set_field:68->tun_id,output:[],'
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:32:30,'
- 'set_field:68->tun_id,output:[]'
- ),
- 'group_id': 1,
- 'type': 'select'
- }
- }
- )
-
- def test_init_agent_portchain_multi_flow_classifiers_port_pairs(self):
- self.port_mapping = {
- '7b718ad7-c2cc-4de0-9ac0-d5f4b6e975aa': {
- 'port_name': 'src_port1',
- 'ofport': 6,
- 'vif_mac': '00:01:02:03:05:07'
- },
- '9ac01d29-797a-4904-97a0-eecc7661b2ad': {
- 'port_name': 'ingress',
- 'ofport': 42,
- 'vif_mac': '00:01:02:03:06:08'
- },
- '02ebda8f-44e5-41ee-8d80-ec47b3c2732e': {
- 'port_name': 'egress',
- 'ofport': 43,
- 'vif_mac': '00:01:02:03:06:09'
- },
- '32971131-e44c-4aad-85f9-7d9f10d07393': {
- 'port_name': 'src_port2',
- 'ofport': 44,
- 'vif_mac': '00:01:02:03:06:10'
- },
- 'b7c69625-9cde-48dd-8858-5d773b002e73': {
- 'port_name': 'dst_port1',
- 'ofport': 45,
- 'vif_mac': '00:01:02:03:06:11'
- },
- '2b7e8e42-b35d-4d49-8397-62088efe144f': {
- 'port_name': 'dst_port2',
- 'ofport': 46,
- 'vif_mac': '00:01:02:03:06:12'
- }
- }
- self.node_flowrules = [{
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': [{
- 'local_endpoint': '10.0.0.6',
- 'ingress': '9ac01d29-797a-4904-97a0-eecc7661b2ad',
- 'weight': 1,
- 'mac_address': '12:34:56:78:52:39'
- }],
- 'del_fcs': [],
- 'segment_id': 82,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:65:d7',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.4',
- 'node_type': 'src_node',
- 'egress': '7b718ad7-c2cc-4de0-9ac0-d5f4b6e975aa',
- 'next_group_id': 1,
- 'host_id': 'test3',
- 'nsp': 256,
- 'portchain_id': 'd92114e8-56df-4bd7-9cf2-fce5ac01c94f',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- '2b7e8e42-b35d-4d49-8397-62088efe144f'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': '7b718ad7-c2cc-4de0-9ac0-d5f4b6e975aa'
- }],
- 'id': u'44c469bf-6c48-4f8f-bb4f-de87b44b02b6'
- }, {
- 'nsi': 254,
- 'ingress': '9ac01d29-797a-4904-97a0-eecc7661b2ad',
- 'next_hops': [{
- 'local_endpoint': '10.0.0.3',
- 'ingress': 'b7c69625-9cde-48dd-8858-5d773b002e73',
- 'weight': 1,
- 'mac_address': '12:34:56:78:36:e9'
- }, {
- 'local_endpoint': '10.0.0.5',
- 'ingress': '2b7e8e42-b35d-4d49-8397-62088efe144f',
- 'weight': 1,
- 'mac_address': '12:34:56:78:51:9a'
- }],
- 'del_fcs': [],
- 'segment_id': 82,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:52:39',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.6',
- 'node_type': 'sf_node',
- 'egress': '02ebda8f-44e5-41ee-8d80-ec47b3c2732e',
- 'next_group_id': None,
- 'host_id': 'test6',
- 'nsp': 256,
- 'portchain_id': 'd92114e8-56df-4bd7-9cf2-fce5ac01c94f',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- 'b7c69625-9cde-48dd-8858-5d773b002e73'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': '32971131-e44c-4aad-85f9-7d9f10d07393'
- }, {
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- '2b7e8e42-b35d-4d49-8397-62088efe144f'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': '7b718ad7-c2cc-4de0-9ac0-d5f4b6e975aa'
- }],
- 'id': u'c065e0c3-a904-4bac-adf2-f038b717c9c2'
- }, {
- 'nsi': 255,
- 'ingress': None,
- 'next_hops': [{
- 'local_endpoint': '10.0.0.6',
- 'ingress': '9ac01d29-797a-4904-97a0-eecc7661b2ad',
- 'weight': 1,
- 'mac_address': '12:34:56:78:52:39'
- }],
- 'del_fcs': [],
- 'segment_id': 82,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:41:cf',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.2',
- 'node_type': 'src_node',
- 'egress': '32971131-e44c-4aad-85f9-7d9f10d07393',
- 'next_group_id': 1,
- 'host_id': 'test1',
- 'nsp': 256,
- 'portchain_id': 'd92114e8-56df-4bd7-9cf2-fce5ac01c94f',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- 'b7c69625-9cde-48dd-8858-5d773b002e73'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '32971131-e44c-4aad-85f9-7d9f10d07393')
- }],
- 'id': u'44c469bf-6c48-4f8f-bb4f-de87b44b02b6'
- }, {
- 'nsi': 253,
- 'ingress': 'b7c69625-9cde-48dd-8858-5d773b002e73',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 82,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:36:e9',
- 'network_type': 'gre',
- 'local_endpoint': '10.0.0.3',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': 'test2',
- 'nsp': 256,
- 'portchain_id': 'd92114e8-56df-4bd7-9cf2-fce5ac01c94f',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- 'b7c69625-9cde-48dd-8858-5d773b002e73'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '32971131-e44c-4aad-85f9-7d9f10d07393')
- }],
- 'id': '4a61e567-4210-41d9-af82-e01b9da47230'
- }, {
- 'nsi': 253,
- 'ingress': '2b7e8e42-b35d-4d49-8397-62088efe144f',
- 'next_hops': None,
- 'del_fcs': [],
- 'segment_id': 82,
- 'group_refcnt': 1,
- 'mac_address': '12:34:56:78:51:9a',
- 'network_type': 'gre',
- 'local_endpoint': u'10.0.0.5',
- 'node_type': 'dst_node',
- 'egress': None,
- 'next_group_id': None,
- 'host_id': 'test4',
- 'nsp': 256,
- 'portchain_id': 'd92114e8-56df-4bd7-9cf2-fce5ac01c94f',
- 'add_fcs': [{
- 'source_port_range_min': None,
- 'destination_ip_prefix': None,
- 'protocol': None,
- 'ethertype': 'IPv4',
- 'l7_parameters': {},
- 'source_port_range_max': None,
- 'source_ip_prefix': None,
- 'logical_destination_port': (
- '2b7e8e42-b35d-4d49-8397-62088efe144f'),
- 'destination_port_range_min': None,
- 'destination_port_range_max': None,
- 'logical_source_port': (
- '7b718ad7-c2cc-4de0-9ac0-d5f4b6e975aa')
- }],
- 'id': '4a61e567-4210-41d9-af82-e01b9da47230'
- }]
- self.init_agent()
- for port_id in self.port_mapping:
- self.agent.sfc_treat_devices_added_updated(port_id)
- self.assertItemsEqual(
- self.added_flows, [{
- 'actions': 'resubmit(,5)',
- 'dl_type': 34887,
- 'priority': 10,
- 'table': 0
- }, {
- 'actions': 'resubmit(,30)',
- 'dl_type': 34887,
- 'priority': 10
- }, {
- 'actions': 'output:1',
- 'priority': 0,
- 'table': 30
- }, {
- 'actions': 'resubmit(,31)',
- 'in_port': 1,
- 'priority': 10,
- 'table': 30
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65791,'
- 'set_mpls_ttl:255,output:2'
- ),
- 'dl_type': 2048,
- 'in_port': 44,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': 'group:1',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 0,
- 'table': 31
- }, {
- 'actions': 'pop_mpls:0x0800,output:45',
- 'dl_dst': '00:01:02:03:06:11',
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': (
- 'mod_dl_dst:12:34:56:78:36:e9,'
- 'set_field:82->tun_id,output:[]'
- ),
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 0,
- 'table': 31,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65790,'
- 'set_mpls_ttl:254,output:2'
- ),
- 'dl_type': 2048,
- 'in_port': 43,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': (
- 'mod_dl_dst:12:34:56:78:51:9a,'
- 'set_field:82->tun_id,output:[]'
- ),
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 0,
- 'table': 31,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }, {
- 'actions': 'pop_mpls:0x0800,output:42',
- 'dl_dst': '00:01:02:03:06:08',
- 'dl_type': 34887,
- 'mpls_label': 65791,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': 'pop_mpls:0x0800,output:46',
- 'dl_dst': '00:01:02:03:06:12',
- 'dl_type': 34887,
- 'mpls_label': 65790,
- 'priority': 1,
- 'table': 5
- }, {
- 'actions': (
- 'push_mpls:0x8847,'
- 'set_mpls_label:65791,'
- 'set_mpls_ttl:255,output:2'
- ),
- 'dl_type': 2048,
- 'in_port': 6,
- 'nw_dst': '0.0.0.0/0.0.0.0',
- 'nw_src': '0.0.0.0/0.0.0.0',
- 'priority': 10,
- 'table': 0,
- 'tp_dst': '0/0x0',
- 'tp_src': '0/0x0'
- }]
- )
- self.assertEqual(
- self.group_mapping, {
- 1: {
- 'buckets': (
- 'bucket=weight=1,'
- 'mod_dl_dst:12:34:56:78:52:39,'
- 'set_field:82->tun_id,output:[]'
- ),
- 'group_id': 1,
- 'type': 'select'
- }
- }
- )
diff --git a/networking_sfc/tests/unit/services/sfc/common/__init__.py b/networking_sfc/tests/unit/services/sfc/common/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/tests/unit/services/sfc/common/__init__.py
+++ /dev/null
diff --git a/networking_sfc/tests/unit/services/sfc/common/test_ovs_ext_lib.py b/networking_sfc/tests/unit/services/sfc/common/test_ovs_ext_lib.py
deleted file mode 100644
index 19e20cc..0000000
--- a/networking_sfc/tests/unit/services/sfc/common/test_ovs_ext_lib.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron.common import exceptions
-from neutron.tests import base
-
-from networking_sfc.services.sfc.common import ovs_ext_lib
-
-
-class GetPortMaskTestCase(base.BaseTestCase):
- def setUp(self):
- super(GetPortMaskTestCase, self).setUp()
-
- def tearDown(self):
- super(GetPortMaskTestCase, self).tearDown()
-
- def test_single_port(self):
- masks = ovs_ext_lib.get_port_mask(100, 100)
- self.assertEqual(masks, ['0x64/0xffff'])
-
- def test_invalid_min_port(self):
- self.assertRaises(
- exceptions.InvalidInput,
- ovs_ext_lib.get_port_mask,
- 0, 100
- )
-
- def test_invalid_max_port(self):
- self.assertRaises(
- exceptions.InvalidInput,
- ovs_ext_lib.get_port_mask,
- 100, 65536
- )
-
- def test_invalid_port_range(self):
- self.assertRaises(
- exceptions.InvalidInput,
- ovs_ext_lib.get_port_mask,
- 100, 99
- )
-
- def test_one_port_mask(self):
- masks = ovs_ext_lib.get_port_mask(100, 101)
- self.assertEqual(masks, ['0x64/0xfffe'])
- masks = ovs_ext_lib.get_port_mask(100, 103)
- self.assertEqual(masks, ['0x64/0xfffc'])
- masks = ovs_ext_lib.get_port_mask(32768, 65535)
- self.assertEqual(masks, ['0x8000/0x8000'])
-
- def test_multi_port_masks(self):
- masks = ovs_ext_lib.get_port_mask(101, 102)
- self.assertEqual(masks, ['0x65/0xffff', '0x66/0xffff'])
- masks = ovs_ext_lib.get_port_mask(101, 104)
- self.assertEqual(
- masks,
- ['0x65/0xffff', '0x66/0xfffe', '0x68/0xffff']
- )
- masks = ovs_ext_lib.get_port_mask(1, 65535)
- self.assertEqual(
- masks, [
- '0x1/0xffff',
- '0x2/0xfffe',
- '0x4/0xfffc',
- '0x8/0xfff8',
- '0x10/0xfff0',
- '0x20/0xffe0',
- '0x40/0xffc0',
- '0x80/0xff80',
- '0x100/0xff00',
- '0x200/0xfe00',
- '0x400/0xfc00',
- '0x800/0xf800',
- '0x1000/0xf000',
- '0x2000/0xe000',
- '0x4000/0xc000',
- '0x8000/0x8000'
- ]
- )
- masks = ovs_ext_lib.get_port_mask(32767, 65535)
- self.assertEqual(
- masks, ['0x7fff/0xffff', '0x8000/0x8000']
- )
diff --git a/networking_sfc/tests/unit/services/sfc/drivers/__init__.py b/networking_sfc/tests/unit/services/sfc/drivers/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/tests/unit/services/sfc/drivers/__init__.py
+++ /dev/null
diff --git a/networking_sfc/tests/unit/services/sfc/drivers/ovs/__init__.py b/networking_sfc/tests/unit/services/sfc/drivers/ovs/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/networking_sfc/tests/unit/services/sfc/drivers/ovs/__init__.py
+++ /dev/null
diff --git a/networking_sfc/tests/unit/services/sfc/test_driver_manager.py b/networking_sfc/tests/unit/services/sfc/test_driver_manager.py
deleted file mode 100644
index c247bf2..0000000
--- a/networking_sfc/tests/unit/services/sfc/test_driver_manager.py
+++ /dev/null
@@ -1,325 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import mock
-import pkg_resources
-import six
-import stevedore
-
-from oslo_config import cfg
-
-from neutron.tests import base
-
-from networking_sfc.services.sfc.common import config as sfc_config
-from networking_sfc.services.sfc.common import exceptions as sfc_exc
-from networking_sfc.services.sfc import driver_manager as sfc_driver
-
-
-class DriverManagerTestCase(base.BaseTestCase):
- def setUp(self):
- super(DriverManagerTestCase, self).setUp()
-
- @contextlib.contextmanager
- def driver_manager_context(self, drivers):
- cfg.CONF.register_opts(sfc_config.SFC_DRIVER_OPTS, 'sfc')
- backup_driver_names = cfg.CONF.sfc.drivers
- driver_names = [
- driver_name for driver_name in six.iterkeys(drivers)
- ]
- cfg.CONF.set_override('drivers', driver_names, 'sfc')
- iter_entry_points = pkg_resources.iter_entry_points
- find_entry_points = stevedore.ExtensionManager._find_entry_points
- pkg_resources.iter_entry_points = mock.Mock()
- stevedore.ExtensionManager._find_entry_points = mock.Mock()
- driver_entry_points = []
- for driver_name in driver_names:
- driver_class = mock.Mock()
- ep = mock.Mock()
- ep.name = driver_name
- ep.resolve.return_value = driver_class
- driver_class.return_value = drivers[driver_name]
- drivers[driver_name].native_bulk_support = True
- driver_entry_points.append(ep)
- pkg_resources.iter_entry_points.return_value = driver_entry_points
- stevedore.ExtensionManager._find_entry_points.return_value = (
- driver_entry_points
- )
- yield sfc_driver.SfcDriverManager()
- cfg.CONF.set_override('drivers', backup_driver_names, 'sfc')
- pkg_resources.iter_entry_points = iter_entry_points
- stevedore.ExtensionManager._find_entry_points = find_entry_points
-
- def test_initialize_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- manager.initialize()
- mock_driver1.initialize.assert_called_once_with()
- mock_driver2.initialize.assert_called_once_with()
-
- def test_create_port_chain_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- mocked_context = mock.Mock()
- manager.create_port_chain(mocked_context)
- mock_driver1.create_port_chain.assert_called_once_with(
- mocked_context)
- mock_driver2.create_port_chain.assert_called_once_with(
- mocked_context)
-
- def test_create_port_chain_exception(self):
- mock_driver = mock.Mock()
- mock_driver.create_port_chain = mock.Mock(
- side_effect=sfc_exc.SfcException
- )
- with self.driver_manager_context({
- 'dummy': mock_driver,
- }) as manager:
- mocked_context = mock.Mock()
- self.assertRaises(
- sfc_exc.SfcDriverError,
- manager.create_port_chain, mocked_context
- )
-
- def test_update_port_chain_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- mocked_context = mock.Mock()
- manager.update_port_chain(mocked_context)
- mock_driver1.update_port_chain.assert_called_once_with(
- mocked_context)
- mock_driver2.update_port_chain.assert_called_once_with(
- mocked_context)
-
- def test_update_port_chain_exception(self):
- mock_driver = mock.Mock()
- mock_driver.update_port_chain = mock.Mock(
- side_effect=sfc_exc.SfcException
- )
- with self.driver_manager_context({
- 'dummy': mock_driver,
- }) as manager:
- mocked_context = mock.Mock()
- self.assertRaises(
- sfc_exc.SfcDriverError,
- manager.update_port_chain, mocked_context
- )
-
- def test_delete_port_chain_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- mocked_context = mock.Mock()
- manager.delete_port_chain(mocked_context)
- mock_driver1.delete_port_chain.assert_called_once_with(
- mocked_context)
- mock_driver2.delete_port_chain.assert_called_once_with(
- mocked_context)
-
- def test_delete_port_chain_exception(self):
- mock_driver = mock.Mock()
- mock_driver.delete_port_chain = mock.Mock(
- side_effect=sfc_exc.SfcException
- )
- with self.driver_manager_context({
- 'dummy': mock_driver,
- }) as manager:
- mocked_context = mock.Mock()
- self.assertRaises(
- sfc_exc.SfcDriverError,
- manager.delete_port_chain, mocked_context
- )
-
- def test_create_port_pair_group_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- mocked_context = mock.Mock()
- manager.create_port_pair_group(mocked_context)
- mock_driver1.create_port_pair_group.assert_called_once_with(
- mocked_context)
- mock_driver2.create_port_pair_group.assert_called_once_with(
- mocked_context)
-
- def test_create_port_pair_group_exception(self):
- mock_driver = mock.Mock()
- mock_driver.create_port_pair_group = mock.Mock(
- side_effect=sfc_exc.SfcException
- )
- with self.driver_manager_context({
- 'dummy': mock_driver,
- }) as manager:
- mocked_context = mock.Mock()
- self.assertRaises(
- sfc_exc.SfcDriverError,
- manager.create_port_pair_group, mocked_context
- )
-
- def test_update_port_pair_group_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- mocked_context = mock.Mock()
- manager.update_port_pair_group(mocked_context)
- mock_driver1.update_port_pair_group.assert_called_once_with(
- mocked_context)
- mock_driver2.update_port_pair_group.assert_called_once_with(
- mocked_context)
-
- def test_update_port_pair_group_exception(self):
- mock_driver = mock.Mock()
- mock_driver.update_port_pair_group = mock.Mock(
- side_effect=sfc_exc.SfcException
- )
- with self.driver_manager_context({
- 'dummy': mock_driver,
- }) as manager:
- mocked_context = mock.Mock()
- self.assertRaises(
- sfc_exc.SfcDriverError,
- manager.update_port_pair_group, mocked_context
- )
-
- def test_delete_port_pair_group_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- mocked_context = mock.Mock()
- manager.delete_port_pair_group(mocked_context)
- mock_driver1.delete_port_pair_group.assert_called_once_with(
- mocked_context)
- mock_driver2.delete_port_pair_group.assert_called_once_with(
- mocked_context)
-
- def test_delete_port_pair_group_exception(self):
- mock_driver = mock.Mock()
- mock_driver.delete_port_pair_group = mock.Mock(
- side_effect=sfc_exc.SfcException
- )
- with self.driver_manager_context({
- 'dummy': mock_driver,
- }) as manager:
- mocked_context = mock.Mock()
- self.assertRaises(
- sfc_exc.SfcDriverError,
- manager.delete_port_pair_group, mocked_context
- )
-
- def test_create_port_pair_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- mocked_context = mock.Mock()
- manager.create_port_pair(mocked_context)
- mock_driver1.create_port_pair.assert_called_once_with(
- mocked_context)
- mock_driver2.create_port_pair.assert_called_once_with(
- mocked_context)
-
- def test_create_port_pair_exception(self):
- mock_driver = mock.Mock()
- mock_driver.create_port_pair = mock.Mock(
- side_effect=sfc_exc.SfcException
- )
- with self.driver_manager_context({
- 'dummy': mock_driver,
- }) as manager:
- mocked_context = mock.Mock()
- self.assertRaises(
- sfc_exc.SfcDriverError,
- manager.create_port_pair, mocked_context
- )
-
- def test_update_port_pair_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- mocked_context = mock.Mock()
- manager.update_port_pair(mocked_context)
- mock_driver1.update_port_pair.assert_called_once_with(
- mocked_context)
- mock_driver2.update_port_pair.assert_called_once_with(
- mocked_context)
-
- def test_update_port_pair_exception(self):
- mock_driver = mock.Mock()
- mock_driver.update_port_pair = mock.Mock(
- side_effect=sfc_exc.SfcException
- )
- with self.driver_manager_context({
- 'dummy': mock_driver,
- }) as manager:
- mocked_context = mock.Mock()
- self.assertRaises(
- sfc_exc.SfcDriverError,
- manager.update_port_pair, mocked_context
- )
-
- def test_delete_port_pair_called(self):
- mock_driver1 = mock.Mock()
- mock_driver2 = mock.Mock()
- with self.driver_manager_context({
- 'dummy1': mock_driver1,
- 'dummy2': mock_driver2
- }) as manager:
- mocked_context = mock.Mock()
- manager.delete_port_pair(mocked_context)
- mock_driver1.delete_port_pair.assert_called_once_with(
- mocked_context)
- mock_driver2.delete_port_pair.assert_called_once_with(
- mocked_context)
-
- def test_delete_port_pair_exception(self):
- mock_driver = mock.Mock()
- mock_driver.delete_port_pair = mock.Mock(
- side_effect=sfc_exc.SfcException
- )
- with self.driver_manager_context({
- 'dummy': mock_driver,
- }) as manager:
- mocked_context = mock.Mock()
- self.assertRaises(
- sfc_exc.SfcDriverError,
- manager.delete_port_pair, mocked_context
- )
diff --git a/networking_sfc/tests/unit/services/sfc/test_plugin.py b/networking_sfc/tests/unit/services/sfc/test_plugin.py
deleted file mode 100644
index d265838..0000000
--- a/networking_sfc/tests/unit/services/sfc/test_plugin.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2015 Futurewei. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import mock
-
-from networking_sfc.services.sfc.common import context as sfc_ctx
-from networking_sfc.services.sfc.common import exceptions as sfc_exc
-from networking_sfc.tests.unit.db import test_sfc_db
-
-SFC_PLUGIN_KLASS = (
- "networking_sfc.services.sfc.plugin.SfcPlugin"
-)
-
-
-class SfcPluginTestCase(test_sfc_db.SfcDbPluginTestCase):
- def setUp(self, core_plugin=None, sfc_plugin=None, ext_mgr=None):
- if not sfc_plugin:
- sfc_plugin = SFC_PLUGIN_KLASS
- self.driver_manager_p = mock.patch(
- 'networking_sfc.services.sfc.driver_manager.SfcDriverManager'
- )
- self.fake_driver_manager_class = self.driver_manager_p.start()
- self.fake_driver_manager = mock.Mock()
- self.fake_driver_manager_class.return_value = self.fake_driver_manager
- self.plugin_context = None
- super(SfcPluginTestCase, self).setUp(
- core_plugin=core_plugin, sfc_plugin=sfc_plugin,
- ext_mgr=ext_mgr
- )
-
- def _record_context(self, plugin_context):
- self.plugin_context = plugin_context
-
- def test_create_port_chain_driver_manager_called(self):
- self.fake_driver_manager.create_port_chain = mock.Mock(
- side_effect=self._record_context)
- with self.port_pair_group(port_pair_group={}) as pg:
- with self.port_chain(port_chain={
- 'port_pair_groups': [pg['port_pair_group']['id']]
- }) as pc:
- driver_manager = self.fake_driver_manager
- driver_manager.create_port_chain.assert_called_once_with(
- mock.ANY
- )
- self.assertIsInstance(
- self.plugin_context, sfc_ctx.PortChainContext
- )
- self.assertIn('port_chain', pc)
- self.assertEqual(
- self.plugin_context.current, pc['port_chain'])
-
- def test_create_port_chain_driver_manager_exception(self):
- self.fake_driver_manager.create_port_chain = mock.Mock(
- side_effect=sfc_exc.SfcDriverError(
- method='create_port_chain'
- )
- )
- with self.port_pair_group(port_pair_group={}) as pg:
- self._create_port_chain(
- self.fmt,
- {'port_pair_groups': [pg['port_pair_group']['id']]},
- expected_res_status=500)
- self._test_list_resources('port_chain', [])
- self.fake_driver_manager.delete_port_chain.assert_called_once_with(
- mock.ANY
- )
-
- def test_update_port_chain_driver_manager_called(self):
- self.fake_driver_manager.update_port_chain = mock.Mock(
- side_effect=self._record_context)
- with self.port_pair_group(port_pair_group={}) as pg:
- with self.port_chain(port_chain={
- 'name': 'test1',
- 'port_pair_groups': [pg['port_pair_group']['id']]
- }) as pc:
- req = self.new_update_request(
- 'port_chains', {'port_chain': {'name': 'test2'}},
- pc['port_chain']['id']
- )
- res = self.deserialize(
- self.fmt,
- req.get_response(self.ext_api)
- )
- driver_manager = self.fake_driver_manager
- driver_manager.update_port_chain.assert_called_once_with(
- mock.ANY
- )
- self.assertIsInstance(
- self.plugin_context, sfc_ctx.PortChainContext
- )
- self.assertIn('port_chain', pc)
- self.assertIn('port_chain', res)
- self.assertEqual(
- self.plugin_context.current, res['port_chain'])
- self.assertEqual(
- self.plugin_context.original, pc['port_chain'])
-
- def test_update_port_chain_driver_manager_exception(self):
- self.fake_driver_manager.update_port_chain = mock.Mock(
- side_effect=sfc_exc.SfcDriverError(
- method='update_port_chain'
- )
- )
- with self.port_pair_group(port_pair_group={}) as pg:
- with self.port_chain(port_chain={
- 'name': 'test1',
- 'port_pair_groups': [pg['port_pair_group']['id']]
- }) as pc:
- self.assertIn('port_chain', pc)
- original_port_chain = pc['port_chain']
- req = self.new_update_request(
- 'port_chains', {'port_chain': {'name': 'test2'}},
- pc['port_chain']['id']
- )
- updated_port_chain = copy.copy(original_port_chain)
- updated_port_chain['name'] = 'test2'
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 500)
- res = self._list('port_chains')
- self.assertIn('port_chains', res)
- self.assertItemsEqual(
- res['port_chains'], [updated_port_chain])
-
- def test_delete_port_chain_manager_called(self):
- self.fake_driver_manager.delete_port_chain = mock.Mock(
- side_effect=self._record_context)
- with self.port_pair_group(port_pair_group={}) as pg:
- with self.port_chain(port_chain={
- 'name': 'test1',
- 'port_pair_groups': [pg['port_pair_group']['id']]
- }, do_delete=False) as pc:
- req = self.new_delete_request(
- 'port_chains', pc['port_chain']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 204)
- driver_manager = self.fake_driver_manager
- driver_manager.delete_port_chain.assert_called_once_with(
- mock.ANY
- )
- self.assertIsInstance(
- self.plugin_context, sfc_ctx.PortChainContext
- )
- self.assertIn('port_chain', pc)
- self.assertEqual(self.plugin_context.current, pc['port_chain'])
-
- def test_delete_port_chain_driver_manager_exception(self):
- self.fake_driver_manager.delete_port_chain = mock.Mock(
- side_effect=sfc_exc.SfcDriverError(
- method='delete_port_chain'
- )
- )
- with self.port_pair_group(port_pair_group={
- }, do_delete=False) as pg:
- with self.port_chain(port_chain={
- 'name': 'test1',
- 'port_pair_groups': [pg['port_pair_group']['id']]
- }, do_delete=False) as pc:
- req = self.new_delete_request(
- 'port_chains', pc['port_chain']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 500)
- self._test_list_resources('port_chain', [pc])
-
- def test_create_port_pair_group_driver_manager_called(self):
- self.fake_driver_manager.create_port_pair_group = mock.Mock(
- side_effect=self._record_context)
- with self.port_pair_group(port_pair_group={}) as pc:
- fake_driver_manager = self.fake_driver_manager
- fake_driver_manager.create_port_pair_group.assert_called_once_with(
- mock.ANY
- )
- self.assertIsInstance(
- self.plugin_context, sfc_ctx.PortPairGroupContext
- )
- self.assertIn('port_pair_group', pc)
- self.assertEqual(
- self.plugin_context.current, pc['port_pair_group'])
-
- def test_create_port_pair_group_driver_manager_exception(self):
- self.fake_driver_manager.create_port_pair_group = mock.Mock(
- side_effect=sfc_exc.SfcDriverError(
- method='create_port_pair_group'
- )
- )
- self._create_port_pair_group(self.fmt, {}, expected_res_status=500)
- self._test_list_resources('port_pair_group', [])
- driver_manager = self.fake_driver_manager
- driver_manager.delete_port_pair_group.assert_called_once_with(
- mock.ANY
- )
-
- def test_update_port_pair_group_driver_manager_called(self):
- self.fake_driver_manager.update_port_pair_group = mock.Mock(
- side_effect=self._record_context)
- with self.port_pair_group(port_pair_group={
- 'name': 'test1'
- }) as pc:
- req = self.new_update_request(
- 'port_pair_groups', {'port_pair_group': {'name': 'test2'}},
- pc['port_pair_group']['id']
- )
- res = self.deserialize(
- self.fmt,
- req.get_response(self.ext_api)
- )
- driver_manager = self.fake_driver_manager
- driver_manager.update_port_pair_group.assert_called_once_with(
- mock.ANY
- )
- self.assertIsInstance(
- self.plugin_context, sfc_ctx.PortPairGroupContext
- )
- self.assertIn('port_pair_group', pc)
- self.assertIn('port_pair_group', res)
- self.assertEqual(
- self.plugin_context.current, res['port_pair_group'])
- self.assertEqual(
- self.plugin_context.original, pc['port_pair_group'])
-
- def test_update_port_pair_group_driver_manager_exception(self):
- self.fake_driver_manager.update_port_pair_group = mock.Mock(
- side_effect=sfc_exc.SfcDriverError(
- method='update_port_pair_group'
- )
- )
- with self.port_pair_group(port_pair_group={
- 'name': 'test1'
- }) as pc:
- self.assertIn('port_pair_group', pc)
- original_port_pair_group = pc['port_pair_group']
- req = self.new_update_request(
- 'port_pair_groups', {'port_pair_group': {'name': 'test2'}},
- pc['port_pair_group']['id']
- )
- updated_port_pair_group = copy.copy(original_port_pair_group)
- updated_port_pair_group['name'] = 'test2'
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 500)
- res = self._list('port_pair_groups')
- self.assertIn('port_pair_groups', res)
- self.assertItemsEqual(
- res['port_pair_groups'], [updated_port_pair_group])
-
- def test_delete_port_pair_group_manager_called(self):
- self.fake_driver_manager.delete_port_pair_group = mock.Mock(
- side_effect=self._record_context)
- with self.port_pair_group(port_pair_group={
- 'name': 'test1'
- }, do_delete=False) as pc:
- req = self.new_delete_request(
- 'port_pair_groups', pc['port_pair_group']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 204)
- driver_manager = self.fake_driver_manager
- driver_manager.delete_port_pair_group.assert_called_once_with(
- mock.ANY
- )
- self.assertIsInstance(
- self.plugin_context, sfc_ctx.PortPairGroupContext
- )
- self.assertIn('port_pair_group', pc)
- self.assertEqual(
- self.plugin_context.current, pc['port_pair_group'])
-
- def test_delete_port_pair_group_driver_manager_exception(self):
- self.fake_driver_manager.delete_port_pair_group = mock.Mock(
- side_effect=sfc_exc.SfcDriverError(
- method='delete_port_pair_group'
- )
- )
- with self.port_pair_group(port_pair_group={
- 'name': 'test1'
- }, do_delete=False) as pc:
- req = self.new_delete_request(
- 'port_pair_groups', pc['port_pair_group']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 500)
- self._test_list_resources('port_pair_group', [pc])
-
- def test_create_port_pair_driver_manager_called(self):
- self.fake_driver_manager.create_port_pair = mock.Mock(
- side_effect=self._record_context)
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pc:
- driver_manager = self.fake_driver_manager
- driver_manager.create_port_pair.assert_called_once_with(
- mock.ANY
- )
- self.assertIsInstance(
- self.plugin_context, sfc_ctx.PortPairContext
- )
- self.assertIn('port_pair', pc)
- self.assertEqual(self.plugin_context.current, pc['port_pair'])
-
- def test_create_port_pair_driver_manager_exception(self):
- self.fake_driver_manager.create_port_pair = mock.Mock(
- side_effect=sfc_exc.SfcDriverError(
- method='create_port_pair'
- )
- )
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- self._create_port_pair(
- self.fmt,
- {
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- },
- expected_res_status=500)
- self._test_list_resources('port_pair', [])
- driver_manager = self.fake_driver_manager
- driver_manager.delete_port_pair.assert_called_once_with(
- mock.ANY
- )
-
- def test_update_port_pair_driver_manager_called(self):
- self.fake_driver_manager.update_port_pair = mock.Mock(
- side_effect=self._record_context)
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'name': 'test1',
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pc:
- req = self.new_update_request(
- 'port_pairs', {'port_pair': {'name': 'test2'}},
- pc['port_pair']['id']
- )
- res = self.deserialize(
- self.fmt,
- req.get_response(self.ext_api)
- )
- driver_manager = self.fake_driver_manager
- driver_manager.update_port_pair.assert_called_once_with(
- mock.ANY
- )
- self.assertIsInstance(
- self.plugin_context, sfc_ctx.PortPairContext
- )
- self.assertIn('port_pair', pc)
- self.assertIn('port_pair', res)
- self.assertEqual(
- self.plugin_context.current, res['port_pair'])
- self.assertEqual(
- self.plugin_context.original, pc['port_pair'])
-
- def test_update_port_pair_driver_manager_exception(self):
- self.fake_driver_manager.update_port_pair = mock.Mock(
- side_effect=sfc_exc.SfcDriverError(
- method='update_port_pair'
- )
- )
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'name': 'test1',
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }) as pc:
- self.assertIn('port_pair', pc)
- original_port_pair = pc['port_pair']
- req = self.new_update_request(
- 'port_pairs', {'port_pair': {'name': 'test2'}},
- pc['port_pair']['id']
- )
- updated_port_pair = copy.copy(original_port_pair)
- updated_port_pair['name'] = 'test2'
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 500)
- res = self._list('port_pairs')
- self.assertIn('port_pairs', res)
- self.assertItemsEqual(res['port_pairs'], [updated_port_pair])
-
- def test_delete_port_pair_manager_called(self):
- self.fake_driver_manager.delete_port_pair = mock.Mock(
- side_effect=self._record_context)
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'name': 'test1',
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }, do_delete=False) as pc:
- req = self.new_delete_request(
- 'port_pairs', pc['port_pair']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 204)
- fake_driver_manager = self.fake_driver_manager
- fake_driver_manager.delete_port_pair.assert_called_once_with(
- mock.ANY
- )
- self.assertIsInstance(
- self.plugin_context, sfc_ctx.PortPairContext
- )
- self.assertIn('port_pair', pc)
- self.assertEqual(self.plugin_context.current, pc['port_pair'])
-
- def test_delete_port_pair_driver_manager_exception(self):
- self.fake_driver_manager.delete_port_pair = mock.Mock(
- side_effect=sfc_exc.SfcDriverError(
- method='delete_port_pair'
- )
- )
- with self.port(
- name='port1',
- device_id='default'
- ) as src_port, self.port(
- name='port2',
- device_id='default'
- ) as dst_port:
- with self.port_pair(port_pair={
- 'name': 'test1',
- 'ingress': src_port['port']['id'],
- 'egress': dst_port['port']['id']
- }, do_delete=False) as pc:
- req = self.new_delete_request(
- 'port_pairs', pc['port_pair']['id']
- )
- res = req.get_response(self.ext_api)
- self.assertEqual(res.status_int, 500)
- self._test_list_resources('port_pair', [pc])
diff --git a/onboarding.txt b/onboarding.txt
deleted file mode 100644
index a6a7fcc..0000000
--- a/onboarding.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Get on board by filling this out and submitting it for review.
-This is all optional, it's just to give you a taste of the workflow.
-
-Full Name:
-IRC Nick:
-Linux Foundation ID:
-Favourite Open Source project:
-How would you like to help with this project:
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index 4c3f762..0000000
--- a/requirements.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-pbr>=1.6
-
-Paste
-PasteDeploy>=1.5.0
-Routes!=2.0,!=2.1,>=1.12.3;python_version=='2.7'
-Routes!=2.0,>=1.12.3;python_version!='2.7'
-debtcollector>=0.3.0 # Apache-2.0
-eventlet>=0.17.4
-pecan>=1.0.0
-greenlet>=0.3.2
-httplib2>=0.7.5
-requests!=2.9.0,>=2.8.1
-Jinja2>=2.8 # BSD License (3 clause)
-keystonemiddleware>=4.0.0
-netaddr!=0.7.16,>=0.7.12
-python-neutronclient>=2.6.0
-retrying!=1.3.0,>=1.2.3 # Apache-2.0
-ryu>=3.23.2 # Apache-2.0
-SQLAlchemy<1.1.0,>=0.9.9
-WebOb>=1.2.3
-python-keystoneclient!=1.8.0,>=1.6.0
-alembic>=0.8.0
-six>=1.9.0
-stevedore>=1.5.0 # Apache-2.0
-oslo.concurrency>=2.3.0 # Apache-2.0
-oslo.config>=2.7.0 # Apache-2.0
-oslo.context>=0.2.0 # Apache-2.0
-oslo.db>=4.1.0 # Apache-2.0
-oslo.i18n>=1.5.0 # Apache-2.0
-oslo.log>=1.12.0 # Apache-2.0
-oslo.messaging!=2.8.0,!=3.1.0,>2.6.1 # Apache-2.0
-oslo.middleware>=3.0.0 # Apache-2.0
-oslo.policy>=0.5.0 # Apache-2.0
-oslo.rootwrap>=2.0.0 # Apache-2.0
-oslo.serialization>=1.10.0 # Apache-2.0
-oslo.service>=1.0.0 # Apache-2.0
-oslo.utils>=3.2.0 # Apache-2.0
-oslo.versionedobjects>=0.13.0
-
-python-novaclient!=2.33.0,>=2.29.0
-
-# Windows-only requirements
-pywin32;sys_platform=='win32'
-wmi;sys_platform=='win32'
-
-# This project does depend on neutron as a library, but the
-# openstack tooling does not play nicely with projects that
-# are not publicly available in pypi.
-# -e git+https://git.openstack.org/openstack/neutron@stable/liberty#egg=neutron
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index fdd5173..0000000
--- a/setup.cfg
+++ /dev/null
@@ -1,66 +0,0 @@
-[metadata]
-name = networking-sfc
-version = 1.0.0
-summary = API's and implementations to support Service Function Chaining in Neutron.
-description-file =
- README.rst
-author = OpenStack
-author-email = openstack-dev@lists.openstack.org
-home-page = http://www.openstack.org/
-classifier =
- Environment :: OpenStack
- Intended Audience :: Information Technology
- Intended Audience :: System Administrators
- License :: OSI Approved :: Apache Software License
- Operating System :: POSIX :: Linux
- Programming Language :: Python
- Programming Language :: Python :: 2
- Programming Language :: Python :: 2.7
- Programming Language :: Python :: 3
- Programming Language :: Python :: 3.3
- Programming Language :: Python :: 3.4
-
-[files]
-packages =
- networking_sfc
-
-[entry_points]
-neutronclient.extension=
- flow_classifier = networking_sfc.cli.flow_classifier
- port_chain = networking_sfc.cli.port_chain
- port_pair = networking_sfc.cli.port_pair
- port_pair_group = networking_sfc.cli.port_pair_group
-console_scripts =
- neutron-openvswitch-agent = networking_sfc.services.sfc.agent.agent:main
-neutron.db.alembic_migrations =
- networking-sfc = networking_sfc.db.migration:alembic_migrations
-neutron.service_plugins =
- flow_classifier = networking_sfc.services.flowclassifier.plugin:FlowClassifierPlugin
- sfc = networking_sfc.services.sfc.plugin:SfcPlugin
-networking_sfc.sfc.drivers =
- dummy = networking_sfc.services.sfc.drivers.dummy.dummy:DummyDriver
- ovs = networking_sfc.services.sfc.drivers.ovs.driver:OVSSfcDriver
-networking_sfc.flowclassifier.drivers =
- dummy = networking_sfc.services.flowclassifier.drivers.dummy.dummy:DummyDriver
-
-[build_sphinx]
-source-dir = doc/source
-build-dir = doc/build
-all_files = 1
-
-[upload_sphinx]
-upload-dir = doc/build/html
-
-[compile_catalog]
-directory = networking_sfc/locale
-domain = networking-sfc
-
-[update_catalog]
-domain = networking-sfc
-output_dir = networking_sfc/locale
-input_file = networking_sfc/locale/networking-sfc.pot
-
-[extract_messages]
-keywords = _ gettext ngettext l_ lazy_gettext
-mapping_file = babel.cfg
-output_file = networking_sfc/locale/networking-sfc.pot
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 782bb21..0000000
--- a/setup.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
-import setuptools
-
-# In python < 2.7.4, a lazy loading of package `pbr` will break
-# setuptools if some other modules registered functions in `atexit`.
-# solution from: http://bugs.python.org/issue15881#msg170215
-try:
- import multiprocessing # noqa
-except ImportError:
- pass
-
-setuptools.setup(
- setup_requires=['pbr>=1.8'],
- pbr=True)
diff --git a/test-requirements.txt b/test-requirements.txt
deleted file mode 100644
index 52a6177..0000000
--- a/test-requirements.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-hacking<0.11,>=0.10.0
-
-cliff>=1.15.0 # Apache-2.0
-coverage>=3.6
-fixtures>=1.3.1
-mock>=1.2
-python-subunit>=0.0.18
-requests-mock>=0.7.0 # Apache-2.0
-sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2
-oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
-testrepository>=0.0.18
-testtools>=1.4.0
-testscenarios>=0.4
-WebTest>=2.0
-oslotest>=1.10.0 # Apache-2.0
-os-testr>=0.4.1
-tempest-lib>=0.12.0
-ddt>=1.0.1
-pylint==1.4.5 # GNU GPL v2
diff --git a/tools/check_i18n.py b/tools/check_i18n.py
deleted file mode 100644
index 697ad18..0000000
--- a/tools/check_i18n.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from __future__ import print_function
-
-import compiler
-import imp
-import os.path
-import sys
-
-
-def is_localized(node):
- """Check message wrapped by _()"""
- if isinstance(node.parent, compiler.ast.CallFunc):
- if isinstance(node.parent.node, compiler.ast.Name):
- if node.parent.node.name == '_':
- return True
- return False
-
-
-class ASTWalker(compiler.visitor.ASTVisitor):
-
- def default(self, node, *args):
- for child in node.getChildNodes():
- child.parent = node
- compiler.visitor.ASTVisitor.default(self, node, *args)
-
-
-class Visitor(object):
-
- def __init__(self, filename, i18n_msg_predicates,
- msg_format_checkers, debug):
- self.filename = filename
- self.debug = debug
- self.error = 0
- self.i18n_msg_predicates = i18n_msg_predicates
- self.msg_format_checkers = msg_format_checkers
- with open(filename) as f:
- self.lines = f.readlines()
-
- def visitConst(self, node):
- if not isinstance(node.value, str):
- return
-
- if is_localized(node):
- for (checker, msg) in self.msg_format_checkers:
- if checker(node):
- print('%s:%d %s: %s Error: %s' %
- (self.filename, node.lineno,
- self.lines[node.lineno - 1][:-1],
- checker.__name__, msg),
- file=sys.stderr)
- self.error = 1
- return
- if debug:
- print('%s:%d %s: %s' %
- (self.filename, node.lineno,
- self.lines[node.lineno - 1][:-1],
- "Pass"))
- else:
- for (predicate, action, msg) in self.i18n_msg_predicates:
- if predicate(node):
- if action == 'skip':
- if debug:
- print('%s:%d %s: %s' %
- (self.filename, node.lineno,
- self.lines[node.lineno - 1][:-1],
- "Pass"))
- return
- elif action == 'error':
- print('%s:%d %s: %s Error: %s' %
- (self.filename, node.lineno,
- self.lines[node.lineno - 1][:-1],
- predicate.__name__, msg),
- file=sys.stderr)
- self.error = 1
- return
- elif action == 'warn':
- print('%s:%d %s: %s' %
- (self.filename, node.lineno,
- self.lines[node.lineno - 1][:-1],
- "Warn: %s" % msg))
- return
- print('Predicate with wrong action!', file=sys.stderr)
-
-
-def is_file_in_black_list(black_list, f):
- for f in black_list:
- if os.path.abspath(input_file).startswith(
- os.path.abspath(f)):
- return True
- return False
-
-
-def check_i18n(input_file, i18n_msg_predicates, msg_format_checkers, debug):
- input_mod = compiler.parseFile(input_file)
- v = compiler.visitor.walk(input_mod,
- Visitor(input_file,
- i18n_msg_predicates,
- msg_format_checkers,
- debug),
- ASTWalker())
- return v.error
-
-
-if __name__ == '__main__':
- input_path = sys.argv[1]
- cfg_path = sys.argv[2]
- try:
- cfg_mod = imp.load_source('', cfg_path)
- except Exception:
- print("Load cfg module failed", file=sys.stderr)
- sys.exit(1)
-
- i18n_msg_predicates = cfg_mod.i18n_msg_predicates
- msg_format_checkers = cfg_mod.msg_format_checkers
- black_list = cfg_mod.file_black_list
-
- debug = False
- if len(sys.argv) > 3:
- if sys.argv[3] == '-d':
- debug = True
-
- if os.path.isfile(input_path):
- sys.exit(check_i18n(input_path,
- i18n_msg_predicates,
- msg_format_checkers,
- debug))
-
- error = 0
- for dirpath, dirs, files in os.walk(input_path):
- for f in files:
- if not f.endswith('.py'):
- continue
- input_file = os.path.join(dirpath, f)
- if is_file_in_black_list(black_list, input_file):
- continue
- if check_i18n(input_file,
- i18n_msg_predicates,
- msg_format_checkers,
- debug):
- error = 1
- sys.exit(error)
diff --git a/tools/check_i18n_test_case.txt b/tools/check_i18n_test_case.txt
deleted file mode 100644
index 3d1391d..0000000
--- a/tools/check_i18n_test_case.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-# test-case for check_i18n.py
-# python check_i18n.py check_i18n.txt -d
-
-# message format checking
-# capital checking
-msg = _("hello world, error")
-msg = _("hello world_var, error")
-msg = _('file_list xyz, pass')
-msg = _("Hello world, pass")
-
-# format specifier checking
-msg = _("Hello %s world %d, error")
-msg = _("Hello %s world, pass")
-msg = _("Hello %(var1)s world %(var2)s, pass")
-
-# message has been localized
-# is_localized
-msg = _("Hello world, pass")
-msg = _("Hello world, pass") % var
-LOG.debug(_('Hello world, pass'))
-LOG.info(_('Hello world, pass'))
-raise x.y.Exception(_('Hello world, pass'))
-raise Exception(_('Hello world, pass'))
-
-# message need be localized
-# is_log_callfunc
-LOG.debug('hello world, error')
-LOG.debug('hello world, error' % xyz)
-sys.append('hello world, warn')
-
-# is_log_i18n_msg_with_mod
-LOG.debug(_('Hello world, error') % xyz)
-
-# default warn
-msg = 'hello world, warn'
-msg = 'hello world, warn' % var
-
-# message needn't be localized
-# skip only one word
-msg = ''
-msg = "hello,pass"
-
-# skip dict
-msg = {'hello world, pass': 1}
-
-# skip list
-msg = ["hello world, pass"]
-
-# skip subscript
-msg['hello world, pass']
-
-# skip xml marker
-msg = "<test><t></t></test>, pass"
-
-# skip sql statement
-msg = "SELECT * FROM xyz WHERE hello=1, pass"
-msg = "select * from xyz, pass"
-
-# skip add statement
-msg = 'hello world' + e + 'world hello, pass'
-
-# skip doc string
-"""
-Hello world, pass
-"""
-class Msg:
- pass
diff --git a/tools/check_unit_test_structure.sh b/tools/check_unit_test_structure.sh
deleted file mode 100755
index 1aa3841..0000000
--- a/tools/check_unit_test_structure.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env bash
-
-# This script identifies the unit test modules that do not correspond
-# directly with a module in the code tree. See TESTING.rst for the
-# intended structure.
-
-neutron_path=$(cd "$(dirname "$0")/.." && pwd)
-base_test_path=networking_sfc/tests/unit
-test_path=$neutron_path/$base_test_path
-
-test_files=$(find ${test_path} -iname 'test_*.py')
-
-ignore_regexes=(
- "^plugins.*$"
-)
-
-error_count=0
-ignore_count=0
-total_count=0
-for test_file in ${test_files[@]}; do
- relative_path=${test_file#$test_path/}
- expected_path=$(dirname $neutron_path/networking_sfc/$relative_path)
- test_filename=$(basename "$test_file")
- expected_filename=${test_filename#test_}
- # Module filename (e.g. foo/bar.py -> foo/test_bar.py)
- filename=$expected_path/$expected_filename
- # Package dir (e.g. foo/ -> test_foo.py)
- package_dir=${filename%.py}
- if [ ! -f "$filename" ] && [ ! -d "$package_dir" ]; then
- for ignore_regex in ${ignore_regexes[@]}; do
- if [[ "$relative_path" =~ $ignore_regex ]]; then
- ((ignore_count++))
- continue 2
- fi
- done
- echo "Unexpected test file: $base_test_path/$relative_path"
- ((error_count++))
- fi
- ((total_count++))
-done
-
-if [ "$ignore_count" -ne 0 ]; then
- echo "$ignore_count unmatched test modules were ignored"
-fi
-
-if [ "$error_count" -eq 0 ]; then
- echo 'Success! All test modules match targets in the code tree.'
- exit 0
-else
- echo "Failure! $error_count of $total_count test modules do not match targets in the code tree."
- exit 1
-fi
diff --git a/tools/clean.sh b/tools/clean.sh
deleted file mode 100755
index b79f035..0000000
--- a/tools/clean.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes
-rm -rf */*.deb
-rm -rf ./plugins/**/build/ ./plugins/**/dist
-rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-*
diff --git a/tools/i18n_cfg.py b/tools/i18n_cfg.py
deleted file mode 100644
index 5ad1a51..0000000
--- a/tools/i18n_cfg.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import compiler
-import re
-
-
-def is_log_callfunc(n):
- """LOG.xxx('hello %s' % xyz) and LOG('hello')"""
- if isinstance(n.parent, compiler.ast.Mod):
- n = n.parent
- if isinstance(n.parent, compiler.ast.CallFunc):
- if isinstance(n.parent.node, compiler.ast.Getattr):
- if isinstance(n.parent.node.getChildNodes()[0],
- compiler.ast.Name):
- if n.parent.node.getChildNodes()[0].name == 'LOG':
- return True
- return False
-
-
-def is_log_i18n_msg_with_mod(n):
- """LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)"""
- if not isinstance(n.parent.parent, compiler.ast.Mod):
- return False
- n = n.parent.parent
- if isinstance(n.parent, compiler.ast.CallFunc):
- if isinstance(n.parent.node, compiler.ast.Getattr):
- if isinstance(n.parent.node.getChildNodes()[0],
- compiler.ast.Name):
- if n.parent.node.getChildNodes()[0].name == 'LOG':
- return True
- return False
-
-
-def is_wrong_i18n_format(n):
- """Check _('hello %s' % xyz)"""
- if isinstance(n.parent, compiler.ast.Mod):
- n = n.parent
- if isinstance(n.parent, compiler.ast.CallFunc):
- if isinstance(n.parent.node, compiler.ast.Name):
- if n.parent.node.name == '_':
- return True
- return False
-
-
-"""
-Used for check message need be localized or not.
-(predicate_func, action, message)
-"""
-i18n_msg_predicates = [
- # Skip ['hello world', 1]
- (lambda n: isinstance(n.parent, compiler.ast.List), 'skip', ''),
- # Skip {'hellow world', 1}
- (lambda n: isinstance(n.parent, compiler.ast.Dict), 'skip', ''),
- # Skip msg['hello world']
- (lambda n: isinstance(n.parent, compiler.ast.Subscript), 'skip', ''),
- # Skip doc string
- (lambda n: isinstance(n.parent, compiler.ast.Discard), 'skip', ''),
- # Skip msg = "hello", in normal, message should more than one word
- (lambda n: len(n.value.strip().split(' ')) <= 1, 'skip', ''),
- # Skip msg = 'hello world' + vars + 'world hello'
- (lambda n: isinstance(n.parent, compiler.ast.Add), 'skip', ''),
- # Skip xml markers msg = "<test></test>"
- (lambda n: len(re.compile("</.*>").findall(n.value)) > 0, 'skip', ''),
- # Skip sql statement
- (lambda n: len(
- re.compile("^SELECT.*FROM", flags=re.I).findall(n.value)) > 0,
- 'skip', ''),
- # LOG.xxx()
- (is_log_callfunc, 'error', 'Message must be localized'),
- # _('hello %s' % xyz) should be _('hello %s') % xyz
- (is_wrong_i18n_format, 'error',
- ("Message format was wrong, _('hello %s' % xyz) "
- "should be _('hello %s') % xyz")),
- # default
- (lambda n: True, 'warn', 'Message might need localized')
-]
-
-
-"""
-Used for checking message format. (checker_func, message)
-"""
-msg_format_checkers = [
- # If message contain more than on format specifier, it should use
- # mapping key
- (lambda n: len(re.compile("%[bcdeEfFgGnosxX]").findall(n.value)) > 1,
- "The message shouldn't contain more than one format specifier"),
- # Check capital
- (lambda n: n.value.split(' ')[0].count('_') == 0 and
- n.value[0].isalpha() and
- n.value[0].islower(),
- "First letter must be capital"),
- (is_log_i18n_msg_with_mod,
- 'LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)')
-]
-
-
-file_black_list = ["./neutron/tests/unit",
- "./neutron/openstack",
- "./neutron/plugins/bigswitch/tests"]
diff --git a/tools/install_venv.py b/tools/install_venv.py
deleted file mode 100644
index f8fb8fa..0000000
--- a/tools/install_venv.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Copyright 2010 OpenStack Foundation.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Installation script for Neutron's development virtualenv
-"""
-from __future__ import print_function
-
-import os
-import sys
-
-import install_venv_common as install_venv
-
-
-def print_help():
- help = """
- Neutron development environment setup is complete.
-
- Neutron development uses virtualenv to track and manage Python dependencies
- while in development and testing.
-
- To activate the Neutron virtualenv for the extent of your current shell
- session you can run:
-
- $ source .venv/bin/activate
-
- Or, if you prefer, you can run commands in the virtualenv on a case by case
- basis by running:
-
- $ tools/with_venv.sh <your command>
-
- Also, make test will automatically use the virtualenv.
- """
- print(help)
-
-
-def main(argv):
- root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
- venv = os.path.join(root, '.venv')
- pip_requires = os.path.join(root, 'requirements.txt')
- test_requires = os.path.join(root, 'test-requirements.txt')
- py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
- project = 'Neutron'
- install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
- py_version, project)
- options = install.parse_args(argv)
- install.check_python_version()
- install.check_dependencies()
- install.create_virtualenv(no_site_packages=options.no_site_packages)
- install.install_dependencies()
- print_help()
-
-
-if __name__ == '__main__':
- main(sys.argv)
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
deleted file mode 100644
index e279159..0000000
--- a/tools/install_venv_common.py
+++ /dev/null
@@ -1,172 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Provides methods needed by installation script for OpenStack development
-virtual environments.
-
-Since this script is used to bootstrap a virtualenv from the system's Python
-environment, it should be kept strictly compatible with Python 2.6.
-
-Synced in from openstack-common
-"""
-
-from __future__ import print_function
-
-import optparse
-import os
-import subprocess
-import sys
-
-
-class InstallVenv(object):
-
- def __init__(self, root, venv, requirements,
- test_requirements, py_version,
- project):
- self.root = root
- self.venv = venv
- self.requirements = requirements
- self.test_requirements = test_requirements
- self.py_version = py_version
- self.project = project
-
- def die(self, message, *args):
- print(message % args, file=sys.stderr)
- sys.exit(1)
-
- def check_python_version(self):
- if sys.version_info < (2, 6):
- self.die("Need Python Version >= 2.6")
-
- def run_command_with_code(self, cmd, redirect_output=True,
- check_exit_code=True):
- """Runs a command in an out-of-process shell.
-
- Returns the output of that command. Working directory is self.root.
- """
- if redirect_output:
- stdout = subprocess.PIPE
- else:
- stdout = None
-
- proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
- output = proc.communicate()[0]
- if check_exit_code and proc.returncode != 0:
- self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
- return (output, proc.returncode)
-
- def run_command(self, cmd, redirect_output=True, check_exit_code=True):
- return self.run_command_with_code(cmd, redirect_output,
- check_exit_code)[0]
-
- def get_distro(self):
- if (os.path.exists('/etc/fedora-release') or
- os.path.exists('/etc/redhat-release')):
- return Fedora(
- self.root, self.venv, self.requirements,
- self.test_requirements, self.py_version, self.project)
- else:
- return Distro(
- self.root, self.venv, self.requirements,
- self.test_requirements, self.py_version, self.project)
-
- def check_dependencies(self):
- self.get_distro().install_virtualenv()
-
- def create_virtualenv(self, no_site_packages=True):
- """Creates the virtual environment and installs PIP.
-
- Creates the virtual environment and installs PIP only into the
- virtual environment.
- """
- if not os.path.isdir(self.venv):
- print('Creating venv...', end=' ')
- if no_site_packages:
- self.run_command(['virtualenv', '-q', '--no-site-packages',
- self.venv])
- else:
- self.run_command(['virtualenv', '-q', self.venv])
- print('done.')
- else:
- print("venv already exists...")
- pass
-
- def pip_install(self, *args):
- self.run_command(['tools/with_venv.sh',
- 'pip', 'install', '--upgrade'] + list(args),
- redirect_output=False)
-
- def install_dependencies(self):
- print('Installing dependencies with pip (this can take a while)...')
-
- # First things first, make sure our venv has the latest pip and
- # setuptools and pbr
- self.pip_install('pip>=1.4')
- self.pip_install('setuptools')
- self.pip_install('pbr')
-
- self.pip_install('-r', self.requirements, '-r', self.test_requirements)
-
- def parse_args(self, argv):
- """Parses command-line arguments."""
- parser = optparse.OptionParser()
- parser.add_option('-n', '--no-site-packages',
- action='store_true',
- help="Do not inherit packages from global Python "
- "install.")
- return parser.parse_args(argv[1:])[0]
-
-
-class Distro(InstallVenv):
-
- def check_cmd(self, cmd):
- return bool(self.run_command(['which', cmd],
- check_exit_code=False).strip())
-
- def install_virtualenv(self):
- if self.check_cmd('virtualenv'):
- return
-
- if self.check_cmd('easy_install'):
- print('Installing virtualenv via easy_install...', end=' ')
- if self.run_command(['easy_install', 'virtualenv']):
- print('Succeeded')
- return
- else:
- print('Failed')
-
- self.die('ERROR: virtualenv not found.\n\n%s development'
- ' requires virtualenv, please install it using your'
- ' favorite package management tool' % self.project)
-
-
-class Fedora(Distro):
- """This covers all Fedora-based distributions.
-
- Includes: Fedora, RHEL, CentOS, Scientific Linux
- """
-
- def check_pkg(self, pkg):
- return self.run_command_with_code(['rpm', '-q', pkg],
- check_exit_code=False)[1] == 0
-
- def install_virtualenv(self):
- if self.check_cmd('virtualenv'):
- return
-
- if not self.check_pkg('python-virtualenv'):
- self.die("Please install 'python-virtualenv'.")
-
- super(Fedora, self).install_virtualenv()
diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh
deleted file mode 100755
index 3ed73c1..0000000
--- a/tools/pretty_tox.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#! /bin/sh
-
-TESTRARGS=$1
-
-exec 3>&1
-status=$(exec 4>&1 >&3; (python setup.py testr --slowest --testr-args="--subunit $TESTRARGS"; echo $? >&4 ) | $(dirname $0)/subunit-trace.py -f) && exit $status
diff --git a/tools/subunit-trace.py b/tools/subunit-trace.py
deleted file mode 100755
index 73f2f10..0000000
--- a/tools/subunit-trace.py
+++ /dev/null
@@ -1,307 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2014 Hewlett-Packard Development Company, L.P.
-# Copyright 2014 Samsung Electronics
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Trace a subunit stream in reasonable detail and high accuracy."""
-
-import argparse
-import functools
-import os
-import re
-import sys
-
-import mimeparse
-import subunit
-import testtools
-
-DAY_SECONDS = 60 * 60 * 24
-FAILS = []
-RESULTS = {}
-
-
-class Starts(testtools.StreamResult):
-
- def __init__(self, output):
- super(Starts, self).__init__()
- self._output = output
-
- def startTestRun(self):
- self._neednewline = False
- self._emitted = set()
-
- def status(self, test_id=None, test_status=None, test_tags=None,
- runnable=True, file_name=None, file_bytes=None, eof=False,
- mime_type=None, route_code=None, timestamp=None):
- super(Starts, self).status(
- test_id, test_status,
- test_tags=test_tags, runnable=runnable, file_name=file_name,
- file_bytes=file_bytes, eof=eof, mime_type=mime_type,
- route_code=route_code, timestamp=timestamp)
- if not test_id:
- if not file_bytes:
- return
- if not mime_type or mime_type == 'test/plain;charset=utf8':
- mime_type = 'text/plain; charset=utf-8'
- primary, sub, parameters = mimeparse.parse_mime_type(mime_type)
- content_type = testtools.content_type.ContentType(
- primary, sub, parameters)
- content = testtools.content.Content(
- content_type, lambda: [file_bytes])
- text = content.as_text()
- if text and text[-1] not in '\r\n':
- self._neednewline = True
- self._output.write(text)
- elif test_status == 'inprogress' and test_id not in self._emitted:
- if self._neednewline:
- self._neednewline = False
- self._output.write('\n')
- worker = ''
- for tag in test_tags or ():
- if tag.startswith('worker-'):
- worker = '(' + tag[7:] + ') '
- if timestamp:
- timestr = timestamp.isoformat()
- else:
- timestr = ''
- self._output.write('%s: %s%s [start]\n' %
- (timestr, worker, test_id))
- self._emitted.add(test_id)
-
-
-def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
- """Clean up the test name for display.
-
- By default we strip out the tags in the test because they don't help us
- in identifying the test that is run to it's result.
-
- Make it possible to strip out the testscenarios information (not to
- be confused with tempest scenarios) however that's often needed to
- indentify generated negative tests.
- """
- if strip_tags:
- tags_start = name.find('[')
- tags_end = name.find(']')
- if tags_start > 0 and tags_end > tags_start:
- newname = name[:tags_start]
- newname += name[tags_end + 1:]
- name = newname
-
- if strip_scenarios:
- tags_start = name.find('(')
- tags_end = name.find(')')
- if tags_start > 0 and tags_end > tags_start:
- newname = name[:tags_start]
- newname += name[tags_end + 1:]
- name = newname
-
- return name
-
-
-def get_duration(timestamps):
- start, end = timestamps
- if not start or not end:
- duration = ''
- else:
- delta = end - start
- duration = '%d.%06ds' % (
- delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
- return duration
-
-
-def find_worker(test):
- for tag in test['tags']:
- if tag.startswith('worker-'):
- return int(tag[7:])
- return 'NaN'
-
-
-# Print out stdout/stderr if it exists, always
-def print_attachments(stream, test, all_channels=False):
- """Print out subunit attachments.
-
- Print out subunit attachments that contain content. This
- runs in 2 modes, one for successes where we print out just stdout
- and stderr, and an override that dumps all the attachments.
- """
- channels = ('stdout', 'stderr')
- for name, detail in test['details'].items():
- # NOTE(sdague): the subunit names are a little crazy, and actually
- # are in the form pythonlogging:'' (with the colon and quotes)
- name = name.split(':')[0]
- if detail.content_type.type == 'test':
- detail.content_type.type = 'text'
- if (all_channels or name in channels) and detail.as_text():
- title = "Captured %s:" % name
- stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
- # indent attachment lines 4 spaces to make them visually
- # offset
- for line in detail.as_text().split('\n'):
- stream.write(" %s\n" % line)
-
-
-def show_outcome(stream, test, print_failures=False, failonly=False):
- global RESULTS
- status = test['status']
- # TODO(sdague): ask lifeless why on this?
- if status == 'exists':
- return
-
- worker = find_worker(test)
- name = cleanup_test_name(test['id'])
- duration = get_duration(test['timestamps'])
-
- if worker not in RESULTS:
- RESULTS[worker] = []
- RESULTS[worker].append(test)
-
- # don't count the end of the return code as a fail
- if name == 'process-returncode':
- return
-
- if status == 'fail':
- FAILS.append(test)
- stream.write('{%s} %s [%s] ... FAILED\n' % (
- worker, name, duration))
- if not print_failures:
- print_attachments(stream, test, all_channels=True)
- elif not failonly:
- if status == 'success':
- stream.write('{%s} %s [%s] ... ok\n' % (
- worker, name, duration))
- print_attachments(stream, test)
- elif status == 'skip':
- stream.write('{%s} %s ... SKIPPED: %s\n' % (
- worker, name, test['details']['reason'].as_text()))
- else:
- stream.write('{%s} %s [%s] ... %s\n' % (
- worker, name, duration, test['status']))
- if not print_failures:
- print_attachments(stream, test, all_channels=True)
-
- stream.flush()
-
-
-def print_fails(stream):
- """Print summary failure report.
-
- Currently unused, however there remains debate on inline vs. at end
- reporting, so leave the utility function for later use.
- """
- if not FAILS:
- return
- stream.write("\n==============================\n")
- stream.write("Failed %s tests - output below:" % len(FAILS))
- stream.write("\n==============================\n")
- for f in FAILS:
- stream.write("\n%s\n" % f['id'])
- stream.write("%s\n" % ('-' * len(f['id'])))
- print_attachments(stream, f, all_channels=True)
- stream.write('\n')
-
-
-def count_tests(key, value):
- count = 0
- for k, v in RESULTS.items():
- for item in v:
- if key in item:
- if re.search(value, item[key]):
- count += 1
- return count
-
-
-def run_time():
- runtime = 0.0
- for k, v in RESULTS.items():
- for test in v:
- runtime += float(get_duration(test['timestamps']).strip('s'))
- return runtime
-
-
-def worker_stats(worker):
- tests = RESULTS[worker]
- num_tests = len(tests)
- delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
- return num_tests, delta
-
-
-def print_summary(stream):
- stream.write("\n======\nTotals\n======\n")
- stream.write("Run: %s in %s sec.\n" % (count_tests('status', '.*'),
- run_time()))
- stream.write(" - Passed: %s\n" % count_tests('status', 'success'))
- stream.write(" - Skipped: %s\n" % count_tests('status', 'skip'))
- stream.write(" - Failed: %s\n" % count_tests('status', 'fail'))
-
- # we could have no results, especially as we filter out the process-codes
- if RESULTS:
- stream.write("\n==============\nWorker Balance\n==============\n")
-
- for w in range(max(RESULTS.keys()) + 1):
- if w not in RESULTS:
- stream.write(
- " - WARNING: missing Worker %s! "
- "Race in testr accounting.\n" % w)
- else:
- num, time = worker_stats(w)
- stream.write(" - Worker %s (%s tests) => %ss\n" %
- (w, num, time))
-
-
-def parse_args():
- parser = argparse.ArgumentParser()
- parser.add_argument('--no-failure-debug', '-n', action='store_true',
- dest='print_failures', help='Disable printing failure '
- 'debug information in realtime')
- parser.add_argument('--fails', '-f', action='store_true',
- dest='post_fails', help='Print failure debug '
- 'information after the stream is proccesed')
- parser.add_argument('--failonly', action='store_true',
- dest='failonly', help="Don't print success items",
- default=(
- os.environ.get('TRACE_FAILONLY', False)
- is not False))
- return parser.parse_args()
-
-
-def main():
- args = parse_args()
- stream = subunit.ByteStreamToStreamResult(
- sys.stdin, non_subunit_name='stdout')
- starts = Starts(sys.stdout)
- outcomes = testtools.StreamToDict(
- functools.partial(show_outcome, sys.stdout,
- print_failures=args.print_failures,
- failonly=args.failonly
- ))
- summary = testtools.StreamSummary()
- result = testtools.CopyStreamResult([starts, outcomes, summary])
- result.startTestRun()
- try:
- stream.run(result)
- finally:
- result.stopTestRun()
- if count_tests('status', '.*') == 0:
- print("The test run didn't actually run any tests")
- return 1
- if args.post_fails:
- print_fails(sys.stdout)
- print_summary(sys.stdout)
- return (0 if summary.wasSuccessful() else 1)
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/tools/tox_install.sh b/tools/tox_install.sh
deleted file mode 100755
index 75b91a3..0000000
--- a/tools/tox_install.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-
-# Many of neutron's repos suffer from the problem of depending on neutron,
-# but it not existing on pypi.
-
-# This wrapper for tox's package installer will use the existing package
-# if it exists, else use zuul-cloner if that program exists, else grab it
-# from neutron master via a hard-coded URL. That last case should only
-# happen with devs running unit tests locally.
-
-# From the tox.ini config page:
-# install_command=ARGV
-# default:
-# pip install {opts} {packages}
-
-set -e
-
-echo "PIP HARDCODE" > /tmp/tox_install.txt
-pip install -U -egit+https://git.openstack.org/openstack/neutron@stable/liberty#egg=neutron
-
-pip install -U $*
-exit $?
diff --git a/tools/with_venv.sh b/tools/with_venv.sh
deleted file mode 100755
index 5fb07ea..0000000
--- a/tools/with_venv.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-TOOLS=`dirname $0`
-VENV=$TOOLS/../.venv
-source $VENV/bin/activate && "$@"
diff --git a/tox.ini b/tox.ini
deleted file mode 100644
index 4b66f8f..0000000
--- a/tox.ini
+++ /dev/null
@@ -1,67 +0,0 @@
-[tox]
-envlist = py34,py27,pep8,pylint
-minversion = 1.6
-skipsdist = True
-
-[testenv]
-setenv = VIRTUAL_ENV={envdir}
-usedevelop = True
-install_command = {toxinidir}/tools/tox_install.sh {opts} {packages}
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-whitelist_externals = sh
-commands =
- sh tools/pretty_tox.sh '{posargs}'
-# there is also secret magic in pretty_tox.sh which lets you run in a fail only
-# mode. To do this define the TRACE_FAILONLY environmental variable.
-
-[testenv:functional]
-setenv = OS_TEST_PATH=./networking_sfc/tests/functional
-commands =
- python setup.py testr --slowest --testr-args='{posargs}'
-
-[testenv:dsvm-functional]
-setenv = OS_TEST_PATH=./networking_sfc/tests/functional
- OS_SUDO_TESTING=1
- OS_ROOTWRAP_CMD=sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
- OS_FAIL_ON_MISSING_DEPS=1
-sitepackages=True
-commands =
- python setup.py testr --slowest --testr-args='{posargs}'
-
-[testenv:pep8]
-commands =
- flake8
- {toxinidir}/tools/check_unit_test_structure.sh
- neutron-db-manage --subproject networking-sfc --database-connection sqlite:// check_migration
-whitelist_externals = sh
-
-[testenv:i18n]
-commands = python ./tools/check_i18n.py ./networking_sfc ./tools/i18n_cfg.py
-
-[testenv:cover]
-commands =
- python setup.py testr --coverage --coverage-package-name=networking_sfc --testr-args='{posargs}'
-
-[testenv:venv]
-commands = {posargs}
-
-[testenv:docs]
-commands = python setup.py build_sphinx
-
-[testenv:py34]
-commands = python -m testtools.run
-
-[flake8]
-ignore =
-show-source = true
-builtins = _
-exclude=.venv,.git,.tox,dist,doc,src,*openstack/common*,*lib/python*,*egg,build,tools,networking_sfc/db/migration/alembic_migrations/versions
-
-[testenv:pylint]
-commands =
- pylint --rcfile=.pylintrc --output-format=colorized {posargs:networking_sfc}
-
-[hacking]
-import_exceptions = neutron.i18n
-local-check-factory = neutron.hacking.checks.factory