aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO.yaml67
-rw-r--r--docs/development/requirements/requirements.rst4
-rw-r--r--docs/release/configguide/feature.configuration.rst4
-rw-r--r--docs/release/release-notes/releasenotes.rst145
-rw-r--r--docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst12
-rw-r--r--docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst25
-rw-r--r--docs/release/scenarios/os-odl-sfc_fdio-ha/scenario.description.rst55
-rw-r--r--docs/release/scenarios/os-odl-sfc_fdio-noha/scenario.description.rst63
-rw-r--r--requirements.txt7
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/README59
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-master.yml227
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml70
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-master.yml22
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_master.yml222
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_pike.yml68
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-master.yml6
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-pike.yml6
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-master.yml26
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-pike.yml26
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml10
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_suse.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_suse.yml)6
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_ubuntu.yml)6
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_suse.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_suse.yml)6
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_ubuntu.yml)6
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_suse.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_suse.yml)6
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_ubuntu.yml)6
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/vars/main.yml2
-rw-r--r--scenarios/os-odl-sfc/xci_overrides5
-rw-r--r--setup.cfg7
-rw-r--r--sfc/lib/cleanup.py10
-rw-r--r--sfc/lib/config.py67
-rw-r--r--sfc/lib/odl_utils.py303
-rw-r--r--sfc/lib/openstack_utils.py243
-rw-r--r--sfc/lib/results.py1
-rw-r--r--sfc/lib/test_utils.py34
-rw-r--r--sfc/tests/functest/config-pike.yaml84
-rw-r--r--sfc/tests/functest/config.yaml67
-rw-r--r--sfc/tests/functest/register-vim.json3
-rw-r--r--sfc/tests/functest/register-vim.json-queens19
-rw-r--r--sfc/tests/functest/run_sfc_tests.py28
-rw-r--r--sfc/tests/functest/sfc_chain_deletion.py105
-rw-r--r--sfc/tests/functest/sfc_one_chain_two_service_functions.py300
-rw-r--r--sfc/tests/functest/sfc_parent_function.py530
-rw-r--r--sfc/tests/functest/sfc_symmetric_chain.py356
-rw-r--r--sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py338
-rw-r--r--sfc/tests/functest/vnfd-templates/test-one-chain-vnfd1.yaml6
-rw-r--r--sfc/tests/functest/vnfd-templates/test-one-chain-vnfd2.yaml6
-rw-r--r--sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml21
-rw-r--r--sfc/tests/functest/vnfd-templates/test-two-chains-vnfd1.yaml6
-rw-r--r--sfc/tests/functest/vnfd-templates/test-two-chains-vnfd2.yaml6
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd-pike.yaml38
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml38
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd-pike.yaml40
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml4
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml46
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml-queens46
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1-pike.yaml38
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml19
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2-pike.yaml39
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml20
60 files changed, 2199 insertions, 1836 deletions
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 00000000..81a1f9e9
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,67 @@
+---
+project: 'Service Function Chaining (sfc)'
+project_creation_date: 'May 5, 2015'
+project_category: 'Collaborative Development'
+lifecycle_state: 'Incubation'
+project_lead: &opnfv_sfc_ptl
+ name: 'Manuel Buil'
+ email: 'manuelbuil87@gmail.com'
+ company: 'gmail.com'
+ timezone: 'Unkown'
+primary_contact: *opnfv_sfc_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.opnfv.org/projects/sfc'
+ key: 'sfc'
+mailing_list:
+ type: 'mailman2'
+ url: 'opnfv-tech-discuss@lists.opnfv.org'
+ tag: '[sfc]'
+realtime_discussion:
+ type: irc
+ server: 'freenode.net'
+ channel: '#opnfv-sfc'
+meetings:
+ - type: 'gotomeeting+irc'
+ agenda: # eg: 'https://wiki.opnfv.org/display/'
+ url: # eg: 'https://global.gotomeeting.com/join/819733085'
+ server: 'freenode.net'
+ channel: '#opnfv-meeting'
+ repeats: 'weekly'
+ time: # eg: '16:00 UTC'
+repositories:
+ - 'sfc'
+committers:
+ - <<: *opnfv_sfc_ptl
+ - name: 'Brady Johnson'
+ email: 'brady.allen.johnson@ericsson.com'
+ company: 'ericsson.com'
+ id: 'ebrjohn'
+ - name: 'Reinaldo Penno'
+ email: 'rapenno@gmail.com'
+ company: 'gmail.com'
+ id: 'repenno'
+ - name: 'Sam Hague'
+ email: 'shague@redhat.com'
+ company: 'redhat.com'
+ id: 'shague'
+ - name: 'Vishal Murgai'
+ email: 'vmurgai@cavium.com'
+ company: 'cavium.com'
+ id: 'vmurgai'
+ - name: 'Tim Rozet'
+ email: 'trozet@redhat.com'
+ company: 'redhat.com'
+ id: 'trozet'
+ - name: 'Manuel Buil'
+ email: 'manuelbuil87@gmail.com'
+ company: 'gmail.com'
+ id: 'mbuil'
+ - name: 'Dimitrios Markou'
+ email: 'mardim@intracom-telecom.com'
+ company: 'intracom-telecom.com'
+ id: 'mardim'
+tsc:
+ # yamllint disable rule:line-length
+ approval: ''
+ # yamllint enable rule:line-length
diff --git a/docs/development/requirements/requirements.rst b/docs/development/requirements/requirements.rst
index e83a3e7e..00b77354 100644
--- a/docs/development/requirements/requirements.rst
+++ b/docs/development/requirements/requirements.rst
@@ -16,7 +16,7 @@ in an OPNFV environment.
Detailed Requirements
+++++++++++++++++++++
-These are the Euphrates specific requirements:
+These are the Fraser specific requirements:
1 The supported Service Chaining encapsulation will be NSH VXLAN-GPE.
@@ -36,7 +36,7 @@ These are the Euphrates specific requirements:
Long Term Requirements
++++++++++++++++++++++
-These requirements are out of the scope of the Euphrates release.
+These requirements are out of the scope of the Fraser release.
1 Dynamic movement of SFs across multiple Compute nodes.
diff --git a/docs/release/configguide/feature.configuration.rst b/docs/release/configguide/feature.configuration.rst
index 37f381d0..ad9725ed 100644
--- a/docs/release/configguide/feature.configuration.rst
+++ b/docs/release/configguide/feature.configuration.rst
@@ -12,9 +12,9 @@ SFC feature desciription
For details of the scenarios and their provided capabilities refer to
the scenario description documents:
-- http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/scenarios/os-odl-sfc-ha/index.html
+- http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/scenarios/os-odl-sfc-ha/index.html
-- http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/scenarios/os-odl-sfc-noha/index.html
+- http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/scenarios/os-odl-sfc-noha/index.html
The SFC feature enables creation of Service Fuction Chains - an ordered list
diff --git a/docs/release/release-notes/releasenotes.rst b/docs/release/release-notes/releasenotes.rst
index 32953313..d57b743f 100644
--- a/docs/release/release-notes/releasenotes.rst
+++ b/docs/release/release-notes/releasenotes.rst
@@ -5,26 +5,25 @@
Abstract
========
-This document compiles the release notes for the Euphrates release of
+This document compiles the release notes for the Fraser release of
OPNFV SFC
Important notes
===============
These notes provide release information for the use of SFC with the
-Apex installer and xci tools for the Euphrates release of OPNFV.
+Apex installer, xci tool and Compass4NFV for the Fraser release of OPNFV.
Summary
=======
-The goal of the SFC Euphrates release is to integrate the OpenDaylight
-SFC project into an OPNFV environment, with either the Apex installer or
-xci tools. In subsequent releases, we expect Compass4NFV to integrate
-the SFC scenarios too.
+The goal of the SFC Fraser release is to integrate the OpenDaylight
+SFC project into an OPNFV environment, with either the Apex installer,
+xci tools or Compass4NFV.
More information about OpenDaylight and SFC can be found here.
-- `OpenDaylight <http://www.opendaylight.org/software>`_ version "Nitrogen SR1"
+- `OpenDaylight <http://www.opendaylight.org/software>`_ version "Oxygen SR1"
- `Service function chaining <https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home>`_
@@ -33,11 +32,11 @@ More information about OpenDaylight and SFC can be found here.
- Overall OPNFV documentation
- - `Design document <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/development/design/index.html>`_
+ - `Design document <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/development/design/index.html>`_
- - `User Guide <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/userguide/index.html>`_
+ - `User Guide <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/userguide/index.html>`_
- - `Installation Instructions <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/configguide/index.html>`_
+ - `Installation Instructions <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/configguide/index.html>`_
- Release Notes (this document)
@@ -49,18 +48,18 @@ Release Data
| **Project** | sfc |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | euphrates 1.0 |
+| **Repo/tag** | opnfv-6.1.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Euphrates base release |
+| **Release designation** | Fraser 6.1 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 6th October 2017 |
+| **Release date** | 25th May 2018 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Integrate neutron networking-sfc |
-| | and use the latest tacker code. Move |
-| | to OpenStack ocata and ODL Nitrogen |
+| **Purpose of the delivery** | Move to OpenStack Pike and ODL Oxygen|
+| | Support symmetric testcases |
+| | Support master branch of OpenStack |
+--------------------------------------+--------------------------------------+
Version change
@@ -70,22 +69,22 @@ Module version changes
~~~~~~~~~~~~~~~~~~~~~~
This release of OPNFV sfc is based on following upstream versions:
-- OpenStack Ocata release
+- OpenStack Pike release
-- OpenDaylight Nitrogen SR1 release
+- OpenDaylight Oxygen SR1 release
- Open vSwitch 2.6.1 with Yi Yang NSH patch
Document changes
~~~~~~~~~~~~~~~~
-This is the first tracked version of OPNFV SFC Euphrates. It comes with
+This is the first tracked version of OPNFV SFC Fraser. It comes with
the following documentation:
-- `Design document <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/development/design/index.html>`_
+- `Design document <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/development/design/index.html>`_
-- `User Guide <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/userguide/index.html>`_
+- `User Guide <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/userguide/index.html>`_
-- `Installation Instructions <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/configguide/index.html>`_
+- `Installation Instructions <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/configguide/index.html>`_
- Release notes (This document)
@@ -95,50 +94,29 @@ Reason for version
Feature additions
~~~~~~~~~~~~~~~~~
-- `Integration with neutron networking-sfc`
-- `Moved to latest tacker code`
-- `Started using forwarding graphs as a way to configure SFC`
-- `Created compatibility with latest functest (based on Alpine containers)`
+- `Using SNAPS as base for our tests`
+- `Increase test coverage with two extra test cases: symmetric and deletion`
+- `Reduced the footprint of the image we use for testing to reduce testing time`
Bug corrections
~~~~~~~~~~~~~~~
-**JIRA TICKETS:**
-
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-103>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-104>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-105>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-106>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-107>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-108>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-109>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-110>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-111>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-112>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-113>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-114>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-116>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-117>`_
-
-Apart from the OPNFV bug fixes, there were some bugs in ODL and Tacker which
-were fixed as well.
-
Deliverables
------------
Software deliverables
~~~~~~~~~~~~~~~~~~~~~
-No specific deliverables are created, as SFC is included with Apex.
+No specific deliverables are created, as SFC is included with Apex and Compass4NFV.
Documentation deliverables
~~~~~~~~~~~~~~~~~~~~~~~~~~
-- `Design document <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/development/design/index.html>`_
+- `Design document <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/development/design/index.html>`_
-- `User Guide <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/userguide/index.html>`_
+- `User Guide <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/userguide/index.html>`_
-- `Installation Instructions <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/configguide/index.html>`_
+- `Installation Instructions <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/configguide/index.html>`_
- Release notes (This document)
@@ -148,75 +126,32 @@ Known Limitations, Issues and Workarounds
System Limitations
------------------
-The Euphrates 3.0 release has a few limitations:
+The Fraser 1.0 release has a few limitations:
1 - The testcase sfc_two_chains_SSH_and_HTTP is disabled in this release due to
-bugs in ODL, Tacker and networking-sfc:
-
-https://bugs.opendaylight.org/show_bug.cgi?id=9221
-https://bugs.launchpad.net/tacker/+bug/1719839
-https://bugs.launchpad.net/tacker/+bug/1719876
-https://bugs.launchpad.net/networking-sfc/+bug/1719835
-https://bugs.launchpad.net/networking-sfc/+bug/1719883
-
-2 - The topology CLIENT_SERVER_SAME_HOST does not work due to a bug in the
-vxlan_tool. This tool is part of the ODL-SFC repo and provides support for
-non-NSH-aware SFs:
-
-https://bugs.opendaylight.org/show_bug.cgi?id=9219
-
-3 - The topologies CLIENT_SERVER_DIFFERENT_HOST_SPLIT_VNF and
-CLIENT_SERVER_SAME_HOST_SPLIT_VNF do not work because of a ODL bug:
-
-https://bugs.opendaylight.org/show_bug.cgi?id=9220
-
+a missing feature in ODL. We are unable to currently update a chain config
Known issues
------------
-1 - OpenDaylight SFC relies on a version of Open vSwitch (OVS) with
-Network Service Headers (NSH). A version of OVS with NSH currently
-exists, but it is in a branched version of OVS. Extensive upstream
-work has been done to merge the NSH patches into mainstream OVS,
-but the work is still not complete. More information about this
-can be found in the OPNFV SFC design document (link provided above).
-
-2 - Due to a bug in tacker:
-
-https://bugs.launchpad.net/tacker/+bug/1719841
+1 - When tacker is deployed without Mistral, there is an ERROR in the logs and
+the VIM is always in 'PENDING' state because tacker cannot monitor its health.
+However, everything works and SFs can be created.
-it is not possible to run the SFC scenarios in openstack environments
-which require SSL connections to public endpoints and use self-signed
-certificates
+2 - When tacker is deployed without barbican, it cannot be in HA mode because
+barbican is the only way to fetch the fernet keys.
Workarounds
-----------
-There is a way to avoid the known issue number 2 when using xci. Once
-the deployment is successfully done, go to tacker server and modify
-line 242 of the file:
-
-/openstack/venvs/tacker-15.1.7/lib/python2.7/site-packages/keystoneauth1/session.py
-
-So that instead of having:
-
-self.verify = verify
-
-It has:
-
-self.verify = False
-
-Forcing tacker to not check the certificates
-
-
Test results
============
-The Euphrates release of SFC has undergone QA test runs
-with Functest tests on the Apex installer and xci utility
+The Fraser release of SFC has undergone QA test runs with Functest tests on the
+Apex and Compass installers and xci utility
References
==========
-For more information on the OPNFV Euphrates release, please see:
+For more information on the OPNFV Fraser release, please see:
OPNFV
-----
@@ -225,12 +160,12 @@ OPNFV
2) `OPNFV documentation- and software downloads <https://www.opnfv.org/software/download>`_
-3) `OPNFV Danube release <http://wiki.opnfv.org/releases/euphrates>`_
+3) `OPNFV Fraser release <http://wiki.opnfv.org/releases/fraser>`_
OpenStack
---------
-4) `OpenStack Newton Release artifacts <http://www.openstack.org/software/ocata>`_
+4) `OpenStack Pike Release artifacts <http://www.openstack.org/software/pike>`_
5) `OpenStack documentation <http://docs.openstack.org>`_
diff --git a/docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst b/docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst
index 11b41434..3c728861 100644
--- a/docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst
+++ b/docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst
@@ -8,7 +8,7 @@ Introduction
The os-odl-sfc-ha is intended to be used to install the OPNFV SFC project in a standard
OPNFV High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC project
-into the OPNFV environment. The OPNFV SFC Euphrates release uses the OpenDaylight Nitrogen SR1 release.
+into the OPNFV environment. The OPNFV SFC Fraser release uses the OpenDaylight Oxygen SR1 release.
Scenario components and composition
===================================
@@ -74,17 +74,13 @@ Limitations, Issues and Workarounds
.. faults or bugs. If the system design only provide some expected functionality then provide
.. some insight at this point.
-The *client* virtual machine needs to be located in a compute node where at least
-one of the service functions (SFs) is placed. This is due to a limitation in OpenDaylight,
-Nitrogen, which only installs the traffic classifier in the compute nodes where the SFs are.
-
Specific version of OVS
-----------------------
SFC needs changes in OVS to include the Network Service Headers (NSH) Service Chaining
encapsulation. This OVS patch has been ongoing for quite a while (2 years+), and still
has not been officially merged. Previously, SFC used NSH from a branched version of OVS
-based on 2.3.90, called the "Pritesh Patch". In the OpenDaylight Nitrogen SR1 release, SFC was
+based on 2.3.90, called the "Pritesh Patch". In the OpenDaylight Oxygen SR1 release, SFC was
changed to use a newer, branched version of OVS based on 2.6.1, called the "Yi Yang
Patch".
@@ -101,6 +97,6 @@ https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home
https://wiki.opendaylight.org/view/Service_Function_Chaining:Main
-For more information on the OPNFV Euphrates release, please visit:
+For more information on the OPNFV Fraser release, please visit:
-http://www.opnfv.org/euphrates
+http://www.opnfv.org/fraser
diff --git a/docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst b/docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst
index e74e47c4..3c728861 100644
--- a/docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst
+++ b/docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst
@@ -6,10 +6,9 @@ Introduction
============
.. In this section explain the purpose of the scenario and the types of capabilities provided
-The os-odl-sfc-noha is intended to be used to install the OPNFV SFC project in a standard
-OPNFV Non-High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC
-project into the OPNFV environment. The OPNFV SFC Euphrates release uses the OpenDaylight
-Nitrogen SR1 release.
+The os-odl-sfc-ha is intended to be used to install the OPNFV SFC project in a standard
+OPNFV High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC project
+into the OPNFV environment. The OPNFV SFC Fraser release uses the OpenDaylight Oxygen SR1 release.
Scenario components and composition
===================================
@@ -18,9 +17,8 @@ Scenario components and composition
.. to communicate to the user the capabilities available in this scenario.
This scenario installs everything needed to use the SFC OpenDaylight project in an OPNFV
-environment. Since this scenario is Non-High Availability, then only one controller and
-one compute node will be deployed. The classifier used in this scenario is implemented
-by the Netvirt OpenDaylight project.
+environment. The classifier used in this scenario is implemented by the Netvirt OpenDaylight
+project.
Following is a detailed list of what is included with this scenario:
@@ -58,7 +56,7 @@ The VNF Manager
In order to create a VM for each Service Function, a VNF Manager is needed. The OPNFV
SFC project currently uses the Tacker OpenStack project as a VNF Manager. Tacker is
installed on the controller node and manages VNF life cycle, and coordinates VM creation
-with the OpenDaylight SFC project.
+and SFC configuration with OpenStack and OpenDaylight SFC project.
Scenario usage overview
=======================
@@ -76,17 +74,13 @@ Limitations, Issues and Workarounds
.. faults or bugs. If the system design only provide some expected functionality then provide
.. some insight at this point.
-The *client* virtual machine needs to be located in a compute node where at least
-one of the service functions (SFs) is placed. This is due to a limitation in OpenDaylight,
-Nitrogen, which only installs the traffic classifier in the compute nodes where the SFs are.
-
Specific version of OVS
-----------------------
SFC needs changes in OVS to include the Network Service Headers (NSH) Service Chaining
encapsulation. This OVS patch has been ongoing for quite a while (2 years+), and still
has not been officially merged. Previously, SFC used NSH from a branched version of OVS
-based on 2.3.90, called the "Pritesh Patch". In the OpenDaylight Nitrogen SR1 release, SFC was
+based on 2.3.90, called the "Pritesh Patch". In the OpenDaylight Oxygen SR1 release, SFC was
changed to use a newer, branched version of OVS based on 2.6.1, called the "Yi Yang
Patch".
@@ -103,7 +97,6 @@ https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home
https://wiki.opendaylight.org/view/Service_Function_Chaining:Main
-For more information on the OPNFV Euphrates release, please visit:
-
-http://www.opnfv.org/euphrates
+For more information on the OPNFV Fraser release, please visit:
+http://www.opnfv.org/fraser
diff --git a/docs/release/scenarios/os-odl-sfc_fdio-ha/scenario.description.rst b/docs/release/scenarios/os-odl-sfc_fdio-ha/scenario.description.rst
index 39efcacd..b9d965a3 100644
--- a/docs/release/scenarios/os-odl-sfc_fdio-ha/scenario.description.rst
+++ b/docs/release/scenarios/os-odl-sfc_fdio-ha/scenario.description.rst
@@ -8,7 +8,7 @@ Introduction
The os-odl-sfc_fdio-ha is intended to be used to install the OPNFV SFC project in a standard
OPNFV High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC project
-into the OPNFV environment. The OPNFV SFC Euphrates release uses the OpenDaylight Nitrogen SR1 release.
+into the OPNFV environment. The OPNFV SFC Fraser release uses the OpenDaylight Oxygen SR1 release.
Scenario components and composition
===================================
@@ -16,48 +16,9 @@ Scenario components and composition
.. what each component provides and why it has been included in order
.. to communicate to the user the capabilities available in this scenario.
-This scenario installs everything needed to use the SFC OpenDaylight project in an OPNFV
-environment. The classifier used in this scenario is implemented by the Netvirt OpenDaylight
-project.
-
-Following is a detailed list of what is included with this scenario:
-
OpenDaylight features installed
-------------------------------
-The OpenDaylight SDN controller is installed in the controller node.
-
-The following are the SFC features that get installed:
-
-- odl-sfc-model
-- odl-sfc-provider
-- odl-sfc-provider-rest
-- odl-sfc-ovs
-- odl-sfc-openflow-renderer
-
-The following are the Netvirt features that get installed:
-
-- odl-netvirt-openstack
-- odl-sfc-genius
-- odl-neutron-service
-- odl-neutron-northbound-api
-- odl-neutron-spi
-- odl-neutron-transcriber
-- odl-ovsdb-southbound-impl-api
-- odl-ovsdb-southbound-impl-impl
-- odl-ovsdb-library
-
-By simply installing the odl-netvirt-sfc feature, all the dependant features
-will automatically be installed.
-
-The VNF Manager
----------------
-
-In order to create a VM for each Service Function, a VNF Manager is needed. The OPNFV
-SFC project currently uses the Tacker OpenStack project as a VNF Manager. Tacker is
-installed on the controller node and manages VNF life cycle, and coordinates VM creation
-and SFC configuration with OpenStack and OpenDaylight SFC project.
-
Scenario usage overview
=======================
.. Provide a brief overview on how to use the scenario and the features available to the
@@ -74,10 +35,6 @@ Limitations, Issues and Workarounds
.. faults or bugs. If the system design only provide some expected functionality then provide
.. some insight at this point.
-The *client* virtual machine needs to be located in a compute node where at least
-one of the service functions (SFs) is placed. This is due to a limitation in OpenDaylight,
-Nitrogen, which only installs the traffic classifier in the compute nodes where the SFs are.
-
Specific version of FD.IO
-----------------------
@@ -85,13 +42,3 @@ TO BE ADDED
References
==========
-
-For more information about SFC, please visit:
-
-https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home
-
-https://wiki.opendaylight.org/view/Service_Function_Chaining:Main
-
-For more information on the OPNFV Euphrates release, please visit:
-
-http://www.opnfv.org/euphrates
diff --git a/docs/release/scenarios/os-odl-sfc_fdio-noha/scenario.description.rst b/docs/release/scenarios/os-odl-sfc_fdio-noha/scenario.description.rst
index 6ef8c4ba..b9d965a3 100644
--- a/docs/release/scenarios/os-odl-sfc_fdio-noha/scenario.description.rst
+++ b/docs/release/scenarios/os-odl-sfc_fdio-noha/scenario.description.rst
@@ -6,10 +6,9 @@ Introduction
============
.. In this section explain the purpose of the scenario and the types of capabilities provided
-The os-odl-sfc_fdio-noha is intended to be used to install the OPNFV SFC project in a standard
-OPNFV Non-High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC
-project into the OPNFV environment. The OPNFV SFC Euphrates release uses the OpenDaylight
-Nitrogen SR1 release.
+The os-odl-sfc_fdio-ha is intended to be used to install the OPNFV SFC project in a standard
+OPNFV High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC project
+into the OPNFV environment. The OPNFV SFC Fraser release uses the OpenDaylight Oxygen SR1 release.
Scenario components and composition
===================================
@@ -17,49 +16,9 @@ Scenario components and composition
.. what each component provides and why it has been included in order
.. to communicate to the user the capabilities available in this scenario.
-This scenario installs everything needed to use the SFC OpenDaylight project in an OPNFV
-environment. Since this scenario is Non-High Availability, then only one controller and
-one compute node will be deployed. The classifier used in this scenario is implemented
-by the Netvirt OpenDaylight project.
-
-Following is a detailed list of what is included with this scenario:
-
OpenDaylight features installed
-------------------------------
-The OpenDaylight SDN controller is installed in the controller node.
-
-The following are the SFC features that get installed:
-
-- odl-sfc-model
-- odl-sfc-provider
-- odl-sfc-provider-rest
-- odl-sfc-ovs
-- odl-sfc-openflow-renderer
-
-The following are the Netvirt features that get installed:
-
-- odl-netvirt-openstack
-- odl-sfc-genius
-- odl-neutron-service
-- odl-neutron-northbound-api
-- odl-neutron-spi
-- odl-neutron-transcriber
-- odl-ovsdb-southbound-impl-api
-- odl-ovsdb-southbound-impl-impl
-- odl-ovsdb-library
-
-By simply installing the odl-netvirt-sfc feature, all the dependant features
-will automatically be installed.
-
-The VNF Manager
----------------
-
-In order to create a VM for each Service Function, a VNF Manager is needed. The OPNFV
-SFC project currently uses the Tacker OpenStack project as a VNF Manager. Tacker is
-installed on the controller node and manages VNF life cycle, and coordinates VM creation
-with the OpenDaylight SFC project.
-
Scenario usage overview
=======================
.. Provide a brief overview on how to use the scenario and the features available to the
@@ -76,26 +35,10 @@ Limitations, Issues and Workarounds
.. faults or bugs. If the system design only provide some expected functionality then provide
.. some insight at this point.
-The *client* virtual machine needs to be located in a compute node where at least
-one of the service functions (SFs) is placed. This is due to a limitation in OpenDaylight,
-Nitrogen, which only installs the traffic classifier in the compute nodes where the SFs are.
-
Specific version of FD.IO
-----------------------
TO BE ADDED
-
References
==========
-
-For more information about SFC, please visit:
-
-https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home
-
-https://wiki.opendaylight.org/view/Service_Function_Chaining:Main
-
-For more information on the OPNFV Euphrates release, please visit:
-
-http://www.opnfv.org/euphrates
-
diff --git a/requirements.txt b/requirements.txt
index dce5e2e0..0a4947bb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,13 +2,14 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr!=2.1.0,>=2.0.0 # Apache-2.0
-paramiko>=2.0 # LGPLv2.1+
+paramiko>=2.0.0 # LGPLv2.1+
python-glanceclient>=2.8.0 # Apache-2.0
requests>=2.14.2 # Apache-2.0
xmltodict>=0.10.1 # MIT
python-keystoneclient>=3.8.0 # Apache-2.0
-python-novaclient>=9.0.0 # Apache-2.0
+python-novaclient>=9.1.0 # Apache-2.0
python-tackerclient>=0.8.0 # Apache-2.0
-PyYAML>=3.10.0 # MIT
+PyYAML>=3.12 # MIT
opnfv
snaps
+xtesting # Apache-2.0
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/README b/scenarios/os-odl-sfc/role/os-odl-sfc/README
index 3cb8cb29..a7461ec8 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/README
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/README
@@ -2,61 +2,10 @@ This is the role which deploys the os-odl-sfc scenarios in xci.
This role currently works with:
-- OpenStack stable/pike
-- ODL Nitrogen RC1
+- OpenStack stable/queens
+- ODL Oxygen
- OVS+NSH patch
- OpenSUSE 42.3 or Ubuntu 16.04
-# PREPARATIONS #
-
-1 - If you don’t have a key already, generate an SSH key in $HOME/.ssh
-ssh-keygen -t rsa
-
-2 - Clone OPNFV releng-xci repository
-git clone https://gerrit.opnfv.org/gerrit/releng-xci.git
-
-3 - Change into directory where the sandbox script is located:
-cd releng-xci/xci
-
-4 - Use a version of releng-xci which we know works
-
-git checkout cf2cd4e4b87a5e392bc4ba49749a349925ba2f86
-
-Then, depending on the scenario which will be run:
-
-## os-odl-sfc-noha ##
-
-To run os-odl-sfc-noha you should export the following variables before
-running xci-deploy.sh. Note that you should change xxxx by the path where
-your releng-xci code is:
-
-export XCI_FLAVOR=noha
-export OPNFV_SCENARIO=os-odl-sfc
-export OPENSTACK_OSA_VERSION=stable/pike
-export VM_MEMORY_SIZE=16384
-export OPENSTACK_BIFROST_VERSION=bd7e99bf7a00e4c9ad7d03d752d7251e3caf8509
-
-## os-odl-sfc-ha ##
-
-To run os-odl-sfc-ha you should export the following variables before
-running xci-deploy.sh:
-
-export XCI_FLAVOR=ha
-export OPNFV_SCENARIO=os-odl-sfc
-export OPENSTACK_OSA_VERSION=stable/pike
-export VM_MEMORY_SIZE=20480
-export OPENSTACK_BIFROST_VERSION=bd7e99bf7a00e4c9ad7d03d752d7251e3caf8509
-
-
-# LIMITATIONS #
-
-1 - It is using a private branch for the os-neutron role. This is because
-there are several patches pending to be upstreamed. This is the branch we
-are using:
-
-https://github.com/manuelbuil/openstack-ansible-os_neutron/tree/testing-ovs-nsh2
-
-We will stop doing this as soon as the patches are merged upstream
-
-2 - It is using a private branch for tacker code because a bug does not
-allow SSL. We will stop doing this as soon as the bug is fixed
+Follow this link:
+https://wiki.opnfv.org/display/sfc/Deploy+OPNFV+SFC+scenarios
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-master.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-master.yml
deleted file mode 100644
index a82e01ed..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-master.yml
+++ /dev/null
@@ -1,227 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-# these versions are based on the osa commit 7b3aac28a0a87e5966527829f6b0abcbc2303cc7 on 2017-12-11
-# https://review.openstack.org/gitweb?p=openstack/openstack-ansible.git;a=commit;h=7b3aac28a0a87e5966527829f6b0abcbc2303cc7
-- name: ansible-hardening
- scm: git
- src: https://git.openstack.org/openstack/ansible-hardening
- version: 46a94c72518f83d27b25a5fa960dde7130956215
-- name: apt_package_pinning
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
- version: eba07d7dd7962d90301c49fc088551f9b35f367a
-- name: pip_install
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-pip_install
- version: 32c27505c6e0ee00ea0fb4a1c62240c60f17a0e3
-- name: galera_client
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-galera_client
- version: 9a8302cbba24ea4e5907567e5f93e874d30d79df
-- name: galera_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-galera_server
- version: aa452989d7295111962f67a3f3a96d96bc408846
-- name: ceph_client
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
- version: 34a04f7b24c80297866bc5ab56618e2211b1d5f9
-- name: haproxy_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
- version: 9966fd96fede46c3b00c9e069e402eae90c66f17
-- name: keepalived
- scm: git
- src: https://github.com/evrardjp/ansible-keepalived
- version: 5deafcab39de162ac1550c58246963974e8dcf4e
-- name: lxc_container_create
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
- version: 68f81c679be88577633f98e8b9252a62bdcef754
-- name: lxc_hosts
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
- version: 84ac3442e542aeedf1396c88e0387b4ea1548eb1
-- name: memcached_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
- version: ae6f721dc0342e1e7b45ff2448ab51f7539dc01f
-- name: openstack_hosts
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
- version: 05c7f09d181de1809fd596cc0d879c49e3f86bbf
-- name: os_keystone
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
- version: cd9d4ef7d8614d241fa40ba33c1c205fd2b47fa1
-- name: openstack_openrc
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
- version: d594c2debc249daa5b7f6f2890f546093efd1ee5
-- name: os_aodh
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
- version: ce871dee75511f94bfd24dde8f97e573cf6d3ead
-- name: os_barbican
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
- version: c3e191037d0978479e3cb95a59b2986adab28c69
-- name: os_ceilometer
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
- version: 55bb04eaad4dd5c7fdad742b3557dc30dc9d45bf
-- name: os_cinder
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
- version: 536dd3446e0fc7fc68ab42b982ac9affc4215787
-- name: os_designate
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_designate
- version: a65d7a3394aef340ff94587dd0bb48133ed00763
-- name: os_glance
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_glance
- version: 43aa00424f233a6125f7a9216cec42da1d8ca4c5
-- name: os_gnocchi
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
- version: b1f7574dc529f8298a983d8d0e09520e90b571a8
-- name: os_heat
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_heat
- version: 0b3eb9348d55d6b1cf077a2c45b297f9a1be730d
-- name: os_horizon
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
- version: da72526dc1757688ecec8914344e330aaa0be720
-- name: os_ironic
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
- version: a90558f7a216e5e661c5d1a4048dbe30559542d1
-- name: os_magnum
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
- version: 736d1707339cb99396578018a6bda7af9184fb02
-- name: os_molteniron
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron
- version: 9b4c104a252c453bcd798fec9dbae7224b3d8001
-- name: os_neutron
- scm: git
- src: https://github.com/manuelbuil/openstack-ansible-os_neutron
- version: testing-ovs-nsh2
-- name: os_nova
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_nova
- version: 53df001c9034f198b9349def3c9158f8bbe43ff3
-- name: os_octavia
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_octavia
- version: 02ad3c68802287a1ba54cf10de085dcd14c324d8
-- name: os_rally
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_rally
- version: bc9075dba204e64d11cb397017d32b0c2297eed0
-- name: os_sahara
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
- version: 3c45121050ba21bd284f054d7b82a338f347157f
-- name: os_swift
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_swift
- version: f31217bb097519f15755f2337165657d7eb6b014
-- name: os_tacker
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_tacker
- version: d95902891c4e6200510509c066006c921cfff8df
-- name: os_tempest
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
- version: 866dedbcba180ca82c3c93823cef3db2d3241d1b
-- name: os_trove
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_trove
- version: b425fa316999d0863a44126f239a33d8c3fec3a6
-- name: plugins
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-plugins
- version: d2f60237761646968a4b39b15185fb5c84e7386f
-- name: rabbitmq_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
- version: 311f76890c8f99cb0b46958775d84de614609323
-- name: repo_build
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-repo_build
- version: 59a3f444c263235d8f0f584da8768656179fa02a
-- name: repo_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-repo_server
- version: 7889f37cdd2a90b4b98e8ef2e886f1fd4950fc0a
-- name: rsyslog_client
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
- version: 310cfe9506d3742be10790533ad0d16100d81498
-- name: rsyslog_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
- version: ba7bb699c0c874c7977add86ca308ca18be8f9a8
-- name: sshd
- scm: git
- src: https://github.com/willshersystems/ansible-sshd
- version: 537b9b2bc2fd7f23301222098344727f8161993c
-- name: bird
- scm: git
- src: https://github.com/logan2211/ansible-bird
- version: 5033c412398cf6f98097a9ac274a6f12810c807e
-- name: etcd
- scm: git
- src: https://github.com/logan2211/ansible-etcd
- version: 3933355dfe51477822db517d3c07ad561fb61318
-- name: unbound
- scm: git
- src: https://github.com/logan2211/ansible-unbound
- version: 7be67d6b60718896f0c17a7d4a14b912f72a59ae
-- name: resolvconf
- scm: git
- src: https://github.com/logan2211/ansible-resolvconf
- version: d48dd3eea22094b6ecc6aa6ea07279c8e68e28b5
-- name: ceph-defaults
- scm: git
- src: https://github.com/ceph/ansible-ceph-defaults
- version: 19884aaac1bc58921952af955c66602ccca89e93
-- name: ceph-common
- scm: git
- src: https://github.com/ceph/ansible-ceph-common
- version: 08804bd46dff42ebff64e7f27c86f2265fe4d6fc
-- name: ceph-config
- scm: git
- src: https://github.com/ceph/ansible-ceph-config
- version: e070537f443c3ae5d262835c8b0a7a992850283b
-- name: ceph-mon
- scm: git
- src: https://github.com/ceph/ansible-ceph-mon
- version: 309b7e339e057d56d9dd38bdd61998b900f45ba8
-- name: ceph-mgr
- scm: git
- src: https://github.com/ceph/ansible-ceph-mgr
- version: fe8f0864500b54cc7c9f897b871ba2cdf1d37096
-- name: ceph-osd
- scm: git
- src: https://github.com/ceph/ansible-ceph-osd
- version: e022d6773bc827e75ad051b429dec786a75d68f4
-- name: opendaylight
- scm: git
- src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight
- version: ef1367ad15ad10ac8cc9416f6fd49fd8b350d377
-- name: haproxy_endpoints
- scm: git
- src: https://github.com/logan2211/ansible-haproxy-endpoints
- version: 49901861b16b8afaa9bccdbc649ac956610ff22b
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml
index 4b0b9b8b..a954dccc 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml
@@ -1,31 +1,31 @@
- name: ansible-hardening
scm: git
src: https://git.openstack.org/openstack/ansible-hardening
- version: c05e36f48de66feb47046a0126d986fa03313f29
+ version: cee2e0b5b432c50614b908d9bf50ed2cc32d8daa
- name: apt_package_pinning
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
- version: 9403a36513aee54c15890ac96c1f8c455f9c083d
+ version: 956e06cf66bd878b132c58bdd97304749c0da189
- name: pip_install
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-pip_install
- version: df107891bf9fdfa7287bdfe43f3fa0120a80e5ad
+ version: e52f829e7386e43ca8a85ab820901740590dc6ea
- name: galera_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_client
- version: 52b374547648056b58c544532296599801d501d7
+ version: 79b4bd9980b75355ec729bba37a440f4c88df106
- name: galera_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-galera_server
- version: b124e06872ebeca7d81cb22fb80ae97a995b07a8
+ version: 67628375be0e3996b0f5cbddf8e1b15d6ca85333
- name: ceph_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
- version: 5fcbc68fdbd3105d233fd3c03c887f13227b1c3d
+ version: de60aa5d96cd6803674e8b398828205909aa54a6
- name: haproxy_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
- version: a905aaed8627f59d9dc10b9bc031589a7c65828f
+ version: aacaaed2e36945baa7fb9e8bd6ceb8393dad3730
- name: keepalived
scm: git
src: https://github.com/evrardjp/ansible-keepalived
@@ -33,35 +33,35 @@
- name: lxc_container_create
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
- version: c41d3b20da6be07d9bf5db7f7e6a1384c7cfb5eb
+ version: 8a3b201bbaa82a38bd162315efccec1ec244e481
- name: lxc_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
- version: d974c4db1696027899b28b2cb58800cae9a605e5
+ version: a6f5052063e7ac4157da36c4d105fdb855abd366
- name: memcached_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
- version: 08c483f3c5d49c236194090534a015b67c8cded6
+ version: 2c8cd40e18902b9aa6fab87e9fd299d437ed0a78
- name: openstack_hosts
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
- version: a0d3b9c9756b6e95b0e034f3d0576fbb33607820
+ version: deb8d59ff40d9f828172933df6028202e6c9fb04
- name: os_keystone
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
- version: ffc9c9b5e681748ff3e54e43f22c921e83342a51
+ version: 782ec3eae6d43e00699b831859401b347273f7fd
- name: openstack_openrc
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
- version: b27229ef168aed7f2febf6991b2d7459ec8883ee
+ version: 452a227ea5cf50611832cd289e163d577791407a
- name: os_aodh
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
- version: bcd77b1e10a7054e9365da6a20848b393153d025
+ version: 0321e0bae7050b97fa8e3d66033fe177c31e2d63
- name: os_barbican
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
- version: 0797e8bdadd2fcf4696b22f0e18340c8d9539b09
+ version: 06ef75aa9aa9bd126bd17ab86a7c38f44d901203
- name: os_ceilometer
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
@@ -69,15 +69,15 @@
- name: os_cinder
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
- version: 6f5ab34e5a0694f3fc84e63c912e00e86e3de280
+ version: 9173876f6bfc5e2955d74628b32f5cff2e9e39c3
- name: os_designate
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_designate
- version: eac6d3c674397097d8adf722635252b1822c8f6c
+ version: dd13917240c7dc9fff9df7e042ba32fb002838ce
- name: os_glance
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_glance
- version: 47080919c937aace65fc7dc8e9670dbcfd910b88
+ version: 91e544aadae016c0e190d52a89ce0a1659a84641
- name: os_gnocchi
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
@@ -89,11 +89,11 @@
- name: os_horizon
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
- version: 71aa69b1425f5b5b2bdc274357b62a9b4b57ae8f
+ version: af4abbad26b4ab9ce3c50266bc212199e3e6aea8
- name: os_ironic
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
- version: 34205b6b99fc3cfe54eddbcde0380e626976e425
+ version: 91abf2ec56b9b4337e5e98d9ba6f2c04155331a1
- name: os_magnum
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
@@ -101,15 +101,15 @@
- name: os_molteniron
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron
- version: 58cff32e954ab817d07b8e0a136663c34d7f7b60
+ version: 0506e3c3f511518cbd5e7499e2a675b25d4ac967
- name: os_neutron
scm: git
src: https://github.com/manuelbuil/openstack-ansible-os_neutron
- version: pike-SFC-support
+ version: pike-SFC-support-Feb
- name: os_nova
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_nova
- version: 80e0d04822f7ddc5b8d574329e4eb8a76aea63ff
+ version: 312959bea6d4d577c6a4146ae81fa4044ac26d14
- name: os_octavia
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_octavia
@@ -117,19 +117,19 @@
- name: os_rally
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_rally
- version: b2658fb704fd3a1e8bce794b8bf87ac83931aa46
+ version: c91eb6cc61a1f4c2136084e0df758eed117f1bbb
- name: os_sahara
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
- version: e3e4f1bc8d72dd6fb7e26b8d0d364f9a60e16b0f
+ version: c2ad98dcda096c34e9b63d4e44c9a231ed093fb4
- name: os_swift
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_swift
- version: 0bb5979de285305f652694cee139390a8102c134
+ version: 8f5f4be2c1040220e40b8bddbdf5e3b1a1d35baa
- name: os_tempest
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
- version: 0fb52fcd130bee25f40cd515da69948821d5b504
+ version: bcdfa619fe46629bdf5aa8cde5d1e843e7a7b576
- name: os_trove
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-os_trove
@@ -137,27 +137,27 @@
- name: plugins
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-plugins
- version: 11aed400f86951593bb60d1e853574b67894b0b3
+ version: 3601c1e006890899f3c794cb8654bfaca6c32d58
- name: rabbitmq_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
- version: fa80dfc0f8129e02f3f3b34bb7205889d3e5696c
+ version: 5add96f3d72fb07998da715c52c46eceb54d9c4a
- name: repo_build
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_build
- version: d0079ff721b0f9c4682d57eccfadb36f365eea2b
+ version: 377c4376aef1c67c8f0cb3d3bca741bc102bf740
- name: repo_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-repo_server
- version: 8302adcb11cad4e6245fd6bd1bbb4db08d3b60e9
+ version: 63c6537fcb7fa688e1e82074ea958b9349f58cc7
- name: rsyslog_client
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
- version: f41638370114412b97c6523b4c626ca70f0337f4
+ version: 693255ee40a2908707fcc962d620d68008647a57
- name: rsyslog_server
scm: git
src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
- version: 61a3ab251f63c6156f2a6604ee1a822070e19c90
+ version: fa201bbadeb959f363ecba7046f052b2ee16e474
- name: sshd
scm: git
src: https://github.com/willshersystems/ansible-sshd
@@ -205,8 +205,8 @@
- name: os_tacker
scm: git
src: https://github.com/manuelbuil/openstack-ansible-os_tacker
- version: pike-suse-support
+ version: pike-SFC-support-Feb
- name: opendaylight
scm: git
src: https://git.opendaylight.org/gerrit/p/integration/packaging/ansible-opendaylight.git
- version: 2af197bd13f77d2a07878b160c00f8ceeebb3c34
+ version: 4aabce0605ef0f51eef4d6564cc7d779630706c5
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-master.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-master.yml
deleted file mode 100644
index f0743fc0..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-master.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-# Copyright 2017, Ericsson AB
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-networking_odl_git_repo: https://git.openstack.org/openstack/networking-odl
-networking_odl_git_install_branch: 7a3c5fee7deb01d9237f5d1cc43a17931999af02 # HEAD of "master" as of 24.11.2017
-networking_odl_project_group: neutron_all
-
-networking_sfc_git_repo: https://git.openstack.org/openstack/networking-sfc
-networking_sfc_git_install_branch: 899038b4d48c469af9f8c4982898478f32ba14a8 # HEAD of "master" as of 24.10.2017
-networking_sfc_project_group: neutron_all
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_master.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_master.yml
deleted file mode 100644
index 86501634..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_master.yml
+++ /dev/null
@@ -1,222 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-## NOTICE on items in this file:
-## * If you use anything in the *._git_install_branch field that is not a TAG
-## make sure to leave an in-line comment as to "why".
-
-## For the sake of anyone else editing this file:
-## * If you add services to this file please do so in alphabetical order.
-## * Every entry should be name spaced with the name of the client followed by an "_"
-## * All items with this file should be separated by `name_` note that the name of the
-## package should be one long name with no additional `_` separating it.
-
-
-### Before this is shipped all of these services should have a tag set as the branch,
-### or have a comment / reason attached to them as to why a tag can not work.
-
-
-## Global Requirements
-requirements_git_repo: https://git.openstack.org/openstack/requirements
-requirements_git_install_branch: 691711c0effddd9cbaaadba3d494c15bc422fdd5 # HEAD of "master" as of 24.11.2017
-
-
-## Aodh service
-aodh_git_repo: https://git.openstack.org/openstack/aodh
-aodh_git_install_branch: 359043dc774be847cb539d18d13e336d40453e72 # HEAD of "master" as of 24.11.2017
-aodh_git_project_group: aodh_all
-
-
-## Barbican service
-barbican_git_repo: https://git.openstack.org/openstack/barbican
-barbican_git_install_branch: 5617d605f2e12840933e4a9d6417912cdbb811d5 # HEAD of "master" as of 24.11.2017
-barbican_git_project_group: barbican_all
-
-
-## Ceilometer service
-ceilometer_git_repo: https://git.openstack.org/openstack/ceilometer
-ceilometer_git_install_branch: bd464f1f572ba150f52e284de430d13045dc6c18 # HEAD of "master" as of 24.11.2017
-ceilometer_git_project_group: ceilometer_all
-
-
-## Cinder service
-cinder_git_repo: https://git.openstack.org/openstack/cinder
-cinder_git_install_branch: 80558687d0fa55f2adf699e7369ebe3dbc3591bf # HEAD of "master" as of 24.11.2017
-cinder_git_project_group: cinder_all
-
-
-## Designate service
-designate_git_repo: https://git.openstack.org/openstack/designate
-designate_git_install_branch: 2f75586379e8d611f37e06d385e79d0bc2c84ca1 # HEAD of "master" as of 24.11.2017
-designate_git_project_group: designate_all
-
-
-## Horizon Designate dashboard plugin
-designate_dashboard_git_repo: https://git.openstack.org/openstack/designate-dashboard
-designate_dashboard_git_install_branch: 571e127e5f853aa4dbdd377d831e32f8ff81eafe # HEAD of "master" as of 24.11.2017
-designate_dashboard_git_project_group: horizon_all
-
-
-## Dragonflow service
-dragonflow_git_repo: https://git.openstack.org/openstack/dragonflow
-dragonflow_git_install_branch: 7bf00cf315659252f03f6c65f6159a924da6f978 # HEAD of "master" as of 24.11.2017
-dragonflow_git_project_group: neutron_all
-
-
-## Glance service
-glance_git_repo: https://git.openstack.org/openstack/glance
-glance_git_install_branch: d88bd2ca8ef95810441dae640d3c6b9e79eca353 # HEAD of "master" as of 24.11.2017
-glance_git_project_group: glance_all
-
-
-## Heat service
-heat_git_repo: https://git.openstack.org/openstack/heat
-heat_git_install_branch: f4a06c2a92a361dbb401107b4ea1ab60972f473e # HEAD of "master" as of 24.11.2017
-heat_git_project_group: heat_all
-
-
-## Horizon service
-horizon_git_repo: https://git.openstack.org/openstack/horizon
-horizon_git_install_branch: 846d269d90e01e463b510474040e0ad984a5679f # HEAD of "master" as of 24.11.2017
-horizon_git_project_group: horizon_all
-
-## Horizon Ironic dashboard plugin
-ironic_dashboard_git_repo: https://git.openstack.org/openstack/ironic-ui
-ironic_dashboard_git_install_branch: d6199d51171e6c8700663b0b0618ee0adf033b4d # HEAD of "master" as of 24.11.2017
-ironic_dashboard_git_project_group: horizon_all
-
-## Horizon Magnum dashboard plugin
-magnum_dashboard_git_repo: https://git.openstack.org/openstack/magnum-ui
-magnum_dashboard_git_install_branch: 6160d903fae9c652b459c93c218e0ea75924a85d # HEAD of "master" as of 24.11.2017
-magnum_dashboard_git_project_group: horizon_all
-
-## Horizon LBaaS dashboard plugin
-neutron_lbaas_dashboard_git_repo: https://git.openstack.org/openstack/neutron-lbaas-dashboard
-neutron_lbaas_dashboard_git_install_branch: ef650294bcc7447d441e6a710c39d64e384e1b27 # HEAD of "master" as of 24.11.2017
-neutron_lbaas_dashboard_git_project_group: horizon_all
-
-## Horizon FWaaS dashboard plugin
-neutron_fwaas_dashboard_git_repo: https://git.openstack.org//openstack/neutron-fwaas-dashboard
-neutron_fwaas_dashboard_git_install_branch: 6de122d4753a6db24d2dc4c22a71e702ed980e82 # HEAD of "master" as of 24.11.2017
-neutron_fwaas_dashboard_git_project_group: horizon_all
-
-## Horizon Sahara dashboard plugin
-sahara_dashboard_git_repo: https://git.openstack.org/openstack/sahara-dashboard
-sahara_dashboard_git_install_branch: 3e5c59e6229dac8b303029058fcee9d61200ebc8 # HEAD of "master" as of 24.11.2017
-sahara_dashboard_git_project_group: horizon_all
-
-
-## Keystone service
-keystone_git_repo: https://git.openstack.org/openstack/keystone
-keystone_git_install_branch: 70fe4ec09b55def21361a32c8fa7f12e7c891ab1 # HEAD of "master" as of 24.11.2017
-keystone_git_project_group: keystone_all
-
-
-## Neutron service
-neutron_git_repo: https://git.openstack.org/openstack/neutron
-neutron_git_install_branch: d1277c1630570ca45b490c48371e3f7e97be78c3 # HEAD of "master" as of 24.11.2017
-neutron_git_project_group: neutron_all
-
-neutron_lbaas_git_repo: https://git.openstack.org/openstack/neutron-lbaas
-neutron_lbaas_git_install_branch: b1123e7a759248dfa63afdf8b86aafd692572ebd # HEAD of "master" as of 24.11.2017
-neutron_lbaas_git_project_group: neutron_all
-
-neutron_vpnaas_git_repo: https://git.openstack.org/openstack/neutron-vpnaas
-neutron_vpnaas_git_install_branch: 79e4eb81dd05588bcf68b92d46c62f0d26153542 # HEAD of "master" as of 24.11.2017
-neutron_vpnaas_git_project_group: neutron_all
-
-neutron_fwaas_git_repo: https://git.openstack.org/openstack/neutron-fwaas
-neutron_fwaas_git_install_branch: 74eac2ca2980e6162d9c88ee6bd48830386c392a # HEAD of "master" as of 24.11.2017
-neutron_fwaas_git_project_group: neutron_all
-
-neutron_dynamic_routing_git_repo: https://git.openstack.org/openstack/neutron-dynamic-routing
-neutron_dynamic_routing_git_install_branch: 183c3fa4840d22be1974534eb9e1b28b552f4a42 # HEAD of "master" as of 24.11.2017
-neutron_dynamic_routing_git_project_group: neutron_all
-
-networking_calico_git_repo: https://git.openstack.org/openstack/networking-calico
-networking_calico_git_install_branch: 9688df1a3d1d8b3fd9ba367e82fe6b0559416728 # HEAD of "master" as of 24.11.2017
-networking_calico_git_project_group: neutron_all
-
-## Nova service
-nova_git_repo: https://git.openstack.org/openstack/nova
-nova_git_install_branch: 22a790ef45b0523e8cf2ed97d14e050431c90fd9 # HEAD of "master" as of 24.11.2017
-nova_git_project_group: nova_all
-
-
-## PowerVM Virt Driver
-nova_powervm_git_repo: https://git.openstack.org/openstack/nova-powervm
-nova_powervm_git_install_branch: f2de4441e39b0f66cf31f854b228e9e7037f04de # HEAD of "master" as of 24.11.2017
-nova_powervm_git_project_group: nova_all
-
-
-## LXD Virt Driver
-nova_lxd_git_repo: https://git.openstack.org/openstack/nova-lxd
-nova_lxd_git_install_branch: e498de603b31c189fd32a6067d45a36575b96b0a # HEAD of "master" as of 24.11.2017
-nova_lxd_git_project_group: nova_all
-
-
-## Sahara service
-sahara_git_repo: https://git.openstack.org/openstack/sahara
-sahara_git_install_branch: 395856c513b1efad82db8fa78fb1cbfe0f3a6749 # HEAD of "master" as of 24.11.2017
-sahara_git_project_group: sahara_all
-
-
-## Swift service
-swift_git_repo: https://git.openstack.org/openstack/swift
-swift_git_install_branch: 3135878d2fe9909f49fcadeeb9cc6c6933d06127 # HEAD of "master" as of 24.11.2017
-swift_git_project_group: swift_all
-
-
-## Swift3 middleware
-swift_swift3_git_repo: https://git.openstack.org/openstack/swift3
-swift_swift3_git_install_branch: 1fb6a30ee59a16cd4b6c49bab963ff9e3f974580 # HEAD of "master" as of 24.11.2017
-swift_swift3_git_project_group: swift_all
-
-
-## Ironic service
-ironic_git_repo: https://git.openstack.org/openstack/ironic
-ironic_git_install_branch: 27ce77142bfb9ac56e85db37e0923a0eb47f2f7a # HEAD of "master" as of 24.11.2017
-ironic_git_project_group: ironic_all
-
-## Magnum service
-magnum_git_repo: https://git.openstack.org/openstack/magnum
-magnum_git_install_branch: 4bf3b3263870a4ec81cf372713cacec446b3ee84 # HEAD of "master" as of 24.11.2017
-magnum_git_project_group: magnum_all
-
-## Trove service
-trove_git_repo: https://git.openstack.org/openstack/trove
-trove_git_install_branch: b09d0eb3135047891a369d3c0eb2c6e9ae649f5b # HEAD of "master" as of 24.11.2017
-trove_git_project_group: trove_all
-
-## Horizon Trove dashboard plugin
-trove_dashboard_git_repo: https://git.openstack.org/openstack/trove-dashboard
-trove_dashboard_git_install_branch: 14a4609606d42cae827b8fc6b44453caea258976 # HEAD of "master" as of 24.11.2017
-trove_dashboard_git_project_group: horizon_all
-
-## Octavia service
-octavia_git_repo: https://git.openstack.org/openstack/octavia
-octavia_git_install_branch: bb9bb2d05b268cff9846e0a09ad3940be5fe5a80 # HEAD of "master" as of 24.11.2017
-octavia_git_project_group: octavia_all
-
-## Molteniron service
-molteniron_git_repo: https://git.openstack.org/openstack/molteniron
-molteniron_git_install_branch: 094276cda77d814d07ad885e7d63de8d1243750a # HEAD of "master" as of 24.11.2017
-molteniron_git_project_group: molteniron_all
-
-## Tacker service
-tacker_git_repo: https://git.openstack.org/openstack/tacker
-tacker_git_install_branch: cc03b5d952527b8cad2e2e309a97d55afb1ca559 # HEAD of "master" as of 24.11.2017
-tacker_git_project_group: tacker_all
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_pike.yml
index cecd7db1..59182abb 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_pike.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_pike.yml
@@ -31,42 +31,42 @@
## Global Requirements
requirements_git_repo: https://git.openstack.org/openstack/requirements
-requirements_git_install_branch: 732861162db604622ac23ad65c070e3f69d0b44e # HEAD of "stable/pike" as of 10.11.2017
+requirements_git_install_branch: 6b102588bd6c817d1957236fc24779b8912b3353 # HEAD of "stable/pike" as of 16.02.2018
## Aodh service
aodh_git_repo: https://git.openstack.org/openstack/aodh
-aodh_git_install_branch: ed3ce41fa0ae0173601b683748265e502b84553b # HEAD of "stable/pike" as of 10.11.2017
+aodh_git_install_branch: f0ec11eef92d39bd3f07e026a404e2e7aa7fa6bc # HEAD of "stable/pike" as of 16.02.2018
aodh_git_project_group: aodh_all
## Barbican service
barbican_git_repo: https://git.openstack.org/openstack/barbican
-barbican_git_install_branch: ec47f0358a17fde8fa1253253e21af07f72b7fa3 # HEAD of "stable/pike" as of 10.11.2017
+barbican_git_install_branch: 1d20692a6fe77eacdafdd307498bb11da285c437 # HEAD of "stable/pike" as of 16.02.2018
barbican_git_project_group: barbican_all
## Ceilometer service
ceilometer_git_repo: https://git.openstack.org/openstack/ceilometer
-ceilometer_git_install_branch: 8f10d547a4c4eeac0af2a5c833881dbe48c26464 # HEAD of "stable/pike" as of 10.11.2017
+ceilometer_git_install_branch: 35a33b6daacfb9792382d4d5da4880737ab78fed # HEAD of "stable/pike" as of 16.02.2018
ceilometer_git_project_group: ceilometer_all
## Cinder service
cinder_git_repo: https://git.openstack.org/openstack/cinder
-cinder_git_install_branch: 34928801b06e1162895a64c4e95c2f2692303a50 # HEAD of "stable/pike" as of 10.11.2017
+cinder_git_install_branch: 4fb3a702ba8c3de24c41a6f706597bfa81e60435 # HEAD of "stable/pike" as of 16.02.2018
cinder_git_project_group: cinder_all
## Designate service
designate_git_repo: https://git.openstack.org/openstack/designate
-designate_git_install_branch: 6beba54a71510525d5bbc4956d20d27bffa982e5 # HEAD of "stable/pike" as of 10.11.2017
+designate_git_install_branch: 12fbb38799d6731862f07a9e299d5476a5c02f90 # HEAD of "stable/pike" as of 16.02.2018
designate_git_project_group: designate_all
## Horizon Designate dashboard plugin
designate_dashboard_git_repo: https://git.openstack.org/openstack/designate-dashboard
-designate_dashboard_git_install_branch: bc128a7c29a427933fc4ca94a7510ef8c97e5206 # HEAD of "stable/pike" as of 10.11.2017
+designate_dashboard_git_install_branch: bc128a7c29a427933fc4ca94a7510ef8c97e5206 # HEAD of "stable/pike" as of 16.02.2018
designate_dashboard_git_project_group: horizon_all
@@ -78,140 +78,140 @@ dragonflow_git_project_group: neutron_all
## Glance service
glance_git_repo: https://git.openstack.org/openstack/glance
-glance_git_install_branch: 06af2eb5abe0332f7035a7d7c2fbfd19fbc4dae7 # HEAD of "stable/pike" as of 10.11.2017
+glance_git_install_branch: 5c1f76d91012c9cc7b12f76e917af9e9a9bb7667 # HEAD of "stable/pike" as of 16.02.2018
glance_git_project_group: glance_all
## Heat service
heat_git_repo: https://git.openstack.org/openstack/heat
-heat_git_install_branch: 31175a5641035abeec58c3f135ad09d3f231ac41 # HEAD of "stable/pike" as of 10.11.2017
+heat_git_install_branch: cda1cf15f4a20355c58bd26e2f4ce00007a15266 # HEAD of "stable/pike" as of 16.02.2018
heat_git_project_group: heat_all
## Horizon service
horizon_git_repo: https://git.openstack.org/openstack/horizon
-horizon_git_install_branch: 246ff9f81248a00a434e66d18fad70519ba811cc # HEAD of "stable/pike" as of 10.11.2017
+horizon_git_install_branch: 5d77b95586fd5491b3dee5d5c199c34f53680370 # HEAD of "stable/pike" as of 16.02.2018
horizon_git_project_group: horizon_all
## Horizon Ironic dashboard plugin
ironic_dashboard_git_repo: https://git.openstack.org/openstack/ironic-ui
-ironic_dashboard_git_install_branch: e2cba8ed8745b8ffcaa60d26ab69fd93f61582ad # HEAD of "stable/pike" as of 10.11.2017
+ironic_dashboard_git_install_branch: ca9f9f1e9c2baba5415bee2e7961221a3daa6da6 # HEAD of "stable/pike" as of 16.02.2018
ironic_dashboard_git_project_group: horizon_all
## Horizon Magnum dashboard plugin
magnum_dashboard_git_repo: https://git.openstack.org/openstack/magnum-ui
-magnum_dashboard_git_install_branch: 0b9fc50aada1a3e214acaad1204b48c96a549e5f # HEAD of "stable/pike" as of 10.11.2017
+magnum_dashboard_git_install_branch: 0b9fc50aada1a3e214acaad1204b48c96a549e5f # HEAD of "stable/pike" as of 16.02.2018
magnum_dashboard_git_project_group: horizon_all
## Horizon LBaaS dashboard plugin
neutron_lbaas_dashboard_git_repo: https://git.openstack.org/openstack/neutron-lbaas-dashboard
-neutron_lbaas_dashboard_git_install_branch: a5a05a27e7cab99dc379774f1d01c0076818e539 # HEAD of "stable/pike" as of 10.11.2017
+neutron_lbaas_dashboard_git_install_branch: f51341588490baca3795b6f068347cd2260d2e3b # HEAD of "stable/pike" as of 16.02.2018
neutron_lbaas_dashboard_git_project_group: horizon_all
## Horizon Sahara dashboard plugin
sahara_dashboard_git_repo: https://git.openstack.org/openstack/sahara-dashboard
-sahara_dashboard_git_install_branch: 00c241d97bd3a116513580cfe8006480723d7c17 # HEAD of "stable/pike" as of 10.11.2017
+sahara_dashboard_git_install_branch: d56477dba6f4073ab4df6126bb489743779a3270 # HEAD of "stable/pike" as of 16.02.2018
sahara_dashboard_git_project_group: horizon_all
## Keystone service
keystone_git_repo: https://git.openstack.org/openstack/keystone
-keystone_git_install_branch: d07677aba54362a4a3aa2d165b155105ffe30d73 # HEAD of "stable/pike" as of 10.11.2017
+keystone_git_install_branch: e8953d03926b2a5594bbc3d5d8af6854b97cddb7 # HEAD of "stable/pike" as of 16.02.2018
keystone_git_project_group: keystone_all
## Neutron service
neutron_git_repo: https://git.openstack.org/openstack/neutron
-neutron_git_install_branch: bd64409bbb9465143ea6df9db4d53a7679599b69 # HEAD of "stable/pike" as of 10.11.2017
+neutron_git_install_branch: f9fcf5e34ac68ec44c4b61bd76117d2d9213792b # HEAD of "stable/pike" as of 16.02.2018
neutron_git_project_group: neutron_all
neutron_lbaas_git_repo: https://git.openstack.org/openstack/neutron-lbaas
-neutron_lbaas_git_install_branch: f0b6a85877ba9c31c41fc6c8b96ffd2b63e6afb9 # HEAD of "stable/pike" as of 10.11.2017
+neutron_lbaas_git_install_branch: 49448db1a69d53ad0d137216b1805690a7daef45 # HEAD of "stable/pike" as of 16.02.2018
neutron_lbaas_git_project_group: neutron_all
neutron_vpnaas_git_repo: https://git.openstack.org/openstack/neutron-vpnaas
-neutron_vpnaas_git_install_branch: 60e4e7113b5fbbf28e97ebce2f40b7f1675200e6 # HEAD of "stable/pike" as of 10.11.2017
+neutron_vpnaas_git_install_branch: 60e4e7113b5fbbf28e97ebce2f40b7f1675200e6 # HEAD of "stable/pike" as of 16.02.2018
neutron_vpnaas_git_project_group: neutron_all
neutron_fwaas_git_repo: https://git.openstack.org/openstack/neutron-fwaas
-neutron_fwaas_git_install_branch: c2bafa999f7ea45687d5a3d42739e465564e99d1 # HEAD of "stable/pike" as of 10.11.2017
+neutron_fwaas_git_install_branch: ea4e95913d843d72c8a3079203171813ba69895d # HEAD of "stable/pike" as of 16.02.2018
neutron_fwaas_git_project_group: neutron_all
neutron_dynamic_routing_git_repo: https://git.openstack.org/openstack/neutron-dynamic-routing
-neutron_dynamic_routing_git_install_branch: 9098d4447581117e857d2f86fb4a0508b5ffbb6a # HEAD of "stable/pike" as of 10.11.2017
+neutron_dynamic_routing_git_install_branch: 9098d4447581117e857d2f86fb4a0508b5ffbb6a # HEAD of "stable/pike" as of 16.02.2018
neutron_dynamic_routing_git_project_group: neutron_all
networking_calico_git_repo: https://git.openstack.org/openstack/networking-calico
-networking_calico_git_install_branch: 9688df1a3d1d8b3fd9ba367e82fe6b0559416728 # HEAD of "master" as of 10.11.2017
+networking_calico_git_install_branch: cc3628125775f2f1b3c57c95db3d6b50278dc92b # HEAD of "master" as of 16.02.2018
networking_calico_git_project_group: neutron_all
## Nova service
nova_git_repo: https://git.openstack.org/openstack/nova
-nova_git_install_branch: 8fdb1372138f8371a4d414deb38b86e9197b8649 # HEAD of "stable/pike" as of 10.11.2017
+nova_git_install_branch: 806eda3da84d6f9b47c036ff138415458b837536 # HEAD of "stable/pike" as of 16.02.2018
nova_git_project_group: nova_all
## PowerVM Virt Driver
nova_powervm_git_repo: https://git.openstack.org/openstack/nova-powervm
-nova_powervm_git_install_branch: e0b516ca36fa5dfd38ae6f7ea97afd9a52f313ed # HEAD of "stable/pike" as of 10.11.2017
+nova_powervm_git_install_branch: e0b516ca36fa5dfd38ae6f7ea97afd9a52f313ed # HEAD of "stable/pike" as of 16.02.2018
nova_powervm_git_project_group: nova_all
## LXD Virt Driver
nova_lxd_git_repo: https://git.openstack.org/openstack/nova-lxd
-nova_lxd_git_install_branch: 9747c274138d9ef40512d5015e9e581f6bbec5d9 # HEAD of "stable/pike" as of 10.11.2017
+nova_lxd_git_install_branch: 9747c274138d9ef40512d5015e9e581f6bbec5d9 # HEAD of "stable/pike" as of 16.02.2018
nova_lxd_git_project_group: nova_all
## Sahara service
sahara_git_repo: https://git.openstack.org/openstack/sahara
-sahara_git_install_branch: 3ee0da5ea09904125c44e1f9d1a9b83554b1a1cd # HEAD of "stable/pike" as of 10.11.2017
+sahara_git_install_branch: a39c690aeca04dcde56384448ce577fd50eb8bf1 # HEAD of "stable/pike" as of 16.02.2018
sahara_git_project_group: sahara_all
## Swift service
swift_git_repo: https://git.openstack.org/openstack/swift
-swift_git_install_branch: 0344d6eb5afc723adc7bacf4b4e2aaf04da47548 # HEAD of "stable/pike" as of 10.11.2017
+swift_git_install_branch: 0ff2d5e3b85a42914a89eac64ed9a87172334a2c # HEAD of "stable/pike" as of 16.02.2018
swift_git_project_group: swift_all
## Swift3 middleware
swift_swift3_git_repo: https://git.openstack.org/openstack/swift3
-swift_swift3_git_install_branch: 1fb6a30ee59a16cd4b6c49bab963ff9e3f974580 # HEAD of "master" as of 10.11.2017
+swift_swift3_git_install_branch: 1c117c96dda8113c3398c16e68b61efef397de74 # HEAD of "master" as of 16.02.2018
swift_swift3_git_project_group: swift_all
## Ironic service
ironic_git_repo: https://git.openstack.org/openstack/ironic
-ironic_git_install_branch: c163e78629eac4e696ae62dc9a29a0fc77ca463f # HEAD of "stable/pike" as of 10.11.2017
+ironic_git_install_branch: f6f55a74526e906d061f9abcd9a1ad704f6dcfe5 # HEAD of "stable/pike" as of 16.02.2018
ironic_git_project_group: ironic_all
## Magnum service
magnum_git_repo: https://git.openstack.org/openstack/magnum
-magnum_git_install_branch: 839884593e6f6dabaebe401b013465c836fefc84 # HEAD of "stable/pike" as of 10.11.2017
+magnum_git_install_branch: 6d9914de87a67cd5dd33f9f37b49a28486c52cc6 # HEAD of "stable/pike" as of 16.02.2018
magnum_git_project_group: magnum_all
## Trove service
trove_git_repo: https://git.openstack.org/openstack/trove
-trove_git_install_branch: e6d4b4b3fe1768348c9df815940b97cecb5e7ee2 # HEAD of "stable/pike" as of 10.11.2017
+trove_git_install_branch: 2a39699f8612a8f82dfce68949b714d19a102499 # HEAD of "stable/pike" as of 16.02.2018
trove_git_project_group: trove_all
## Horizon Trove dashboard plugin
trove_dashboard_git_repo: https://git.openstack.org/openstack/trove-dashboard
-trove_dashboard_git_install_branch: 387c3358555ee539f7abbbf4875497497e12c265 # HEAD of "stable/pike" as of 10.11.2017
+trove_dashboard_git_install_branch: bffd0776d139f38f96ce8ded07ccde4b5a41bc7a # HEAD of "stable/pike" as of 16.02.2018
trove_dashboard_git_project_group: horizon_all
## Octavia service
octavia_git_repo: https://git.openstack.org/openstack/octavia
-octavia_git_install_branch: 534e1f932cff19e6a54e256c56b7e3479755760d # HEAD of "stable/pike" as of 10.11.2017
+octavia_git_install_branch: a39cf133518716dc1a60069f5aa46afa384db3a8 # HEAD of "stable/pike" as of 16.02.2018
octavia_git_project_group: octavia_all
## Molteniron service
molteniron_git_repo: https://git.openstack.org/openstack/molteniron
-molteniron_git_install_branch: 094276cda77d814d07ad885e7d63de8d1243750a # HEAD of "master" as of 10.11.2017
+molteniron_git_install_branch: 094276cda77d814d07ad885e7d63de8d1243750a # HEAD of "master" as of 16.02.2018
molteniron_git_project_group: molteniron_all
## Tacker service
tacker_git_repo: https://github.com/manuelbuil/tacker
-tacker_git_install_branch: pike-insecured-bug-fixed
+tacker_git_install_branch: pike-SFC-support-Feb
tacker_git_project_group: tacker_all
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-master.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-master.yml
deleted file mode 100644
index 1cffdf8e..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-master.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-- name: Add networking-odl and networking-sfc repos
- copy:
- src: openstack-ansible/playbooks/defaults/repo_packages/opendaylight-master.yml
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/defaults/repo_packages/opendaylight.yml"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-pike.yml
index 3396b83e..3c80fa5e 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-pike.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-pike.yml
@@ -3,14 +3,14 @@
- name: Add networking-odl and networking-sfc repos
copy:
src: openstack-ansible/playbooks/defaults/repo_packages/opendaylight-pike.yml
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/defaults/repo_packages/opendaylight.yml"
+ dest: "{{openstack_osa_path}}/playbooks/defaults/repo_packages/opendaylight.yml"
- name: Provide nova inventory which adds OVS-NSH hosts
copy:
src: openstack-ansible/playbooks/inventory_odl/env.d/nova.yml
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/inventory/env.d/nova.yml"
+ dest: "{{openstack_osa_path}}/playbooks/inventory/env.d/nova.yml"
- name: Provide neutron inventory which adds ODL hosts
copy:
src: openstack-ansible/playbooks/inventory_odl/env.d/neutron.yml
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/inventory/env.d/neutron.yml"
+ dest: "{{openstack_osa_path}}/playbooks/inventory/env.d/neutron.yml"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-master.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-master.yml
index f58de4c2..fbaa7301 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-master.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-master.yml
@@ -2,29 +2,19 @@
#
- name: copy user_sfc_scenarios_variables.yml (SUSE)
- copy:
- src: "{{XCI_FLAVOR}}/user_sfc_scenarios_variables_suse.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/user_sfc_scenarios_variables.yml"
+ template:
+ src: "{{xci_flavor}}/user_sfc_scenarios_variables_suse.yml.j2"
+ dest: "{{openstack_osa_etc_path}}/user_sfc_scenarios_variables.yml"
when: ansible_pkg_mgr == 'zypper'
- name: copy user_sfc_scenarios_variables.yml (Ubuntu)
- copy:
- src: "{{XCI_FLAVOR}}/user_sfc_scenarios_variables_ubuntu.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/user_sfc_scenarios_variables.yml"
+ template:
+ src: "{{xci_flavor}}/user_sfc_scenarios_variables_ubuntu.yml.j2"
+ dest: "{{openstack_osa_etc_path}}/user_sfc_scenarios_variables.yml"
when: ansible_pkg_mgr == 'apt'
-- name: copy OPNFV role requirements
- copy:
- src: "ansible-role-requirements-master.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/ansible-role-requirements.yml"
-
-- name: copy openstack_services.yml with tacker
- copy:
- src: "tacker_files/openstack_services_master.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/defaults/repo_packages/openstack_services.yml"
-
# To get the mano_host variable (can only be defined here for the inventory)
- name: copy openstack_user_config.yml
copy:
- src: "tacker_files/{{XCI_FLAVOR}}/openstack_user_config.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/openstack_user_config.yml"
+ src: "tacker_files/{{xci_flavor}}/openstack_user_config.yml"
+ dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-pike.yml
index 5459dfed..a9d197da 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-pike.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-pike.yml
@@ -3,55 +3,55 @@
- name: copy user_sfc_scenarios_variables.yml
copy:
- src: "{{XCI_FLAVOR}}/user_sfc_scenarios_variables_pike.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/user_sfc_scenarios_variables.yml"
+ src: "{{xci_flavor}}/user_sfc_scenarios_variables_pike.yml"
+ dest: "{{openstack_osa_etc_path}}/user_sfc_scenarios_variables.yml"
- name: copy OPNFV role requirements
copy:
src: "ansible-role-requirements-pike.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/ansible-role-requirements.yml"
+ dest: "{{openstack_osa_path}}/ansible-role-requirements.yml"
- name: copy openstack_user_config.yml
copy:
- src: "tacker_files/{{XCI_FLAVOR}}/openstack_user_config.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/openstack_user_config.yml"
+ src: "tacker_files/{{xci_flavor}}/openstack_user_config.yml"
+ dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
- name: copy tacker inventory file
copy:
src: "tacker_files/tacker.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/env.d/tacker.yml"
+ dest: "{{openstack_osa_etc_path}}/env.d/tacker.yml"
- name: copy user_secrets.yml for tacker
copy:
src: "tacker_files/user_secrets.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/user_secrets.yml"
+ dest: "{{openstack_osa_etc_path}}/user_secrets.yml"
- name: copy haproxy_config.yml for tacker
copy:
src: "tacker_files/haproxy_config.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/group_vars/all/haproxy_config.yml"
+ dest: "{{openstack_osa_path}}/group_vars/all/haproxy_config.yml"
- name: copy openstack_services.yml with tacker
copy:
src: "tacker_files/openstack_services_pike.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/defaults/repo_packages/openstack_services.yml"
+ dest: "{{openstack_osa_path}}/playbooks/defaults/repo_packages/openstack_services.yml"
- name: copy all/tacker.yml
copy:
src: "tacker_files/all_tacker.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/group_vars/all/tacker.yml"
+ dest: "{{openstack_osa_path}}/group_vars/all/tacker.yml"
- name: copy tacker_all.yml
copy:
src: "tacker_files/tacker_all.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/group_vars/tacker_all.yml"
+ dest: "{{openstack_osa_path}}/group_vars/tacker_all.yml"
- name: copy setup-openstack.yml
copy:
src: "tacker_files/setup-openstack.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/setup-openstack.yml"
+ dest: "{{openstack_osa_path}}/playbooks/setup-openstack.yml"
- name: copy os-tacker-install.yml
copy:
src: "tacker_files/os-tacker-install.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/os-tacker-install.yml"
+ dest: "{{openstack_osa_path}}/playbooks/os-tacker-install.yml"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml
index 819ef203..628d8af8 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml
@@ -10,16 +10,12 @@
- name: Add SFC repos and inventory for Pike
include: add-sfc-repos-and-inventory-pike.yml
- when: OPENSTACK_OSA_VERSION == "stable/pike"
-
-- name: Add SFC repos and inventory for master
- include: add-sfc-repos-and-inventory-master.yml
- when: OPENSTACK_OSA_VERSION != "stable/pike"
+ when: openstack_osa_version == "stable/pike"
- name: Copy the OSA not-yet-upstreamed files for Pike
include: copy-OSA-files-pike.yml
- when: OPENSTACK_OSA_VERSION == "stable/pike"
+ when: openstack_osa_version == "stable/pike"
- name: Copy the OSA not-yet-upstreamed files for master
include: copy-OSA-files-master.yml
- when: OPENSTACK_OSA_VERSION != "stable/pike"
+ when: openstack_osa_version != "stable/pike"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_suse.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_suse.yml.j2
index 94673632..435ec9df 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_suse.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_suse.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -115,3 +116,8 @@ openstack_host_specific_kernel_modules:
- name: openvswitch
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_ubuntu.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2
index 0a32b133..9cc27279 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_ubuntu.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -113,3 +114,8 @@ ovs_nsh_required_metal_packages:
- openvswitch-datapath-dkms
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_suse.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_suse.yml.j2
index 7da0187b..32c73c24 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_suse.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_suse.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -114,3 +115,8 @@ openstack_host_specific_kernel_modules:
- name: openvswitch
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_ubuntu.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2
index e323b3d9..bc554090 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_ubuntu.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -112,3 +113,8 @@ ovs_nsh_required_metal_packages:
- openvswitch-datapath-dkms
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_suse.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_suse.yml.j2
index 7da0187b..32c73c24 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_suse.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_suse.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -114,3 +115,8 @@ openstack_host_specific_kernel_modules:
- name: openvswitch
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_ubuntu.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2
index e323b3d9..bc554090 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_ubuntu.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -112,3 +113,8 @@ ovs_nsh_required_metal_packages:
- openvswitch-datapath-dkms
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/vars/main.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/vars/main.yml
new file mode 100644
index 00000000..629b50c7
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/vars/main.yml
@@ -0,0 +1,2 @@
+---
+odl_repo_version: "{{ lookup('env','ODL_VERSION') }}"
diff --git a/scenarios/os-odl-sfc/xci_overrides b/scenarios/os-odl-sfc/xci_overrides
index 0f8f7436..ecbff0ee 100644
--- a/scenarios/os-odl-sfc/xci_overrides
+++ b/scenarios/os-odl-sfc/xci_overrides
@@ -3,3 +3,8 @@ if [[ $XCI_FLAVOR == "ha" ]]; then
else
export VM_MEMORY_SIZE=16384
fi
+
+# Until this feature is developed, ODL_VERSION must be intialized:
+# https://github.com/ansible/ansible/issues/17329
+# otherwise the lookup in vars/main returns an empty string when not defined
+export ODL_VERSION=${ODL_VERSION:-latest_release}
diff --git a/setup.cfg b/setup.cfg
index 8e5f6230..ed4d2104 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,9 +1,10 @@
[metadata]
name = sfc
-version = 5
home-page = https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home
[files]
packages = sfc
-scripts =
- sfc/tests/functest/run_sfc_tests.py
+
+[entry_points]
+console_scripts =
+ run_sfc_tests = sfc.tests.functest.run_sfc_tests:main
diff --git a/sfc/lib/cleanup.py b/sfc/lib/cleanup.py
index 32835fa8..e614867d 100644
--- a/sfc/lib/cleanup.py
+++ b/sfc/lib/cleanup.py
@@ -1,6 +1,6 @@
+import logging
import sys
import time
-import logging
import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
@@ -82,6 +82,13 @@ def delete_openstack_objects(creators):
logger.error('Unexpected error cleaning - %s', e)
+# Networking-odl generates a new security group when creating a router
+# which is not tracked by SNAPs
+def delete_untracked_security_groups():
+ openstack_sfc = os_sfc_utils.OpenStackSFC()
+ openstack_sfc.delete_all_security_groups()
+
+
def cleanup_odl(odl_ip, odl_port):
delete_odl_resources(odl_ip, odl_port, 'service-function-forwarder')
delete_odl_resources(odl_ip, odl_port, 'service-function-chain')
@@ -98,6 +105,7 @@ def cleanup(creators, odl_ip=None, odl_port=None):
delete_vnfds()
delete_vims()
delete_openstack_objects(creators)
+ delete_untracked_security_groups()
if odl_ip is not None and odl_port is not None:
cleanup_odl(odl_ip, odl_port)
diff --git a/sfc/lib/config.py b/sfc/lib/config.py
index bc955d8b..e6149081 100644
--- a/sfc/lib/config.py
+++ b/sfc/lib/config.py
@@ -8,16 +8,18 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
+
+import logging
import os
-import yaml
import sfc
+import yaml
import functest
-
-import sfc.lib.test_utils as test_utils
-from functest.utils.constants import CONST
-import logging
import functest.utils.functest_utils as ft_utils
+import sfc.lib.test_utils as test_utils
+
+from functest.utils import config
+from functest.utils import env
logger = logging.getLogger(__name__)
@@ -28,8 +30,7 @@ class CommonConfig(object):
"""
def __init__(self):
- self.line_length = 30
- self.test_db = ft_utils.get_functest_config("results.test_db_url")
+ self.line_length = 35
self.functest_repo_path = os.path.dirname(functest.__file__)
self.functest_logging_api = os.path.join(self.functest_repo_path,
"ci", "logging.ini")
@@ -41,16 +42,31 @@ class CommonConfig(object):
self.sfc_test_dir, "vnfd-default-params-file")
self.vnffgd_dir = os.path.join(self.sfc_test_dir, "vnffgd-templates")
self.functest_results_dir = os.path.join(
- CONST.dir_results, "odl-sfc")
- self.config_file = os.path.join(self.sfc_test_dir, "config.yaml")
+ getattr(config.CONF, 'dir_results'), "odl-sfc")
+
+ # We need to know the openstack version in order to use one config or
+ # another. For Pike we will use config-pike.yaml. Queens and Rocky
+ # will use config.yaml
+ if 'OPENSTACK_OSA_VERSION' in os.environ:
+ if os.environ['OPENSTACK_OSA_VERSION'] == 'stable/pike':
+ self.config_file = os.path.join(self.sfc_test_dir,
+ "config-pike.yaml")
+ else:
+ self.config_file = os.path.join(self.sfc_test_dir,
+ "config.yaml")
+ else:
+ self.config_file = os.path.join(self.sfc_test_dir,
+ "config-pike.yaml")
+
+ logger.info("The config file used is {}".format(self.config_file))
self.vim_file = os.path.join(self.sfc_test_dir, "register-vim.json")
- self.installer_type = CONST.__getattribute__('INSTALLER_TYPE')
+ self.installer_type = env.get('INSTALLER_TYPE')
self.installer_fields = test_utils.fill_installer_dict(
- self.installer_type)
+ self.installer_type)
- self.installer_ip = CONST.__getattribute__('INSTALLER_IP')
+ self.installer_ip = env.get('INSTALLER_IP')
self.installer_user = ft_utils.get_parameter_from_yaml(
self.installer_fields['user'], self.config_file)
@@ -58,19 +74,19 @@ class CommonConfig(object):
try:
self.installer_password = ft_utils.get_parameter_from_yaml(
self.installer_fields['password'], self.config_file)
- except:
+ except Exception:
self.installer_password = None
try:
self.installer_key_file = ft_utils.get_parameter_from_yaml(
self.installer_fields['pkey_file'], self.config_file)
- except:
+ except Exception:
self.installer_key_file = None
try:
self.installer_cluster = ft_utils.get_parameter_from_yaml(
self.installer_fields['cluster'], self.config_file)
- except:
+ except Exception:
self.installer_cluster = None
self.flavor = ft_utils.get_parameter_from_yaml(
@@ -87,8 +103,20 @@ class CommonConfig(object):
"defaults.image_format", self.config_file)
self.image_url = ft_utils.get_parameter_from_yaml(
"defaults.image_url", self.config_file)
- self.dir_functest_data = ft_utils.get_functest_config(
- "general.dir.functest_data")
+ try:
+ self.vnf_image_name = ft_utils.get_parameter_from_yaml(
+ "defaults.vnf_image_name", self.config_file)
+ self.vnf_image_url = ft_utils.get_parameter_from_yaml(
+ "defaults.vnf_image_url", self.config_file)
+ self.vnf_image_format = ft_utils.get_parameter_from_yaml(
+ "defaults.vnf_image_format", self.config_file)
+ except ValueError:
+ # If the parameter does not exist we use the default
+ self.vnf_image_name = self.image_name
+ self.vnf_image_url = self.image_url
+ self.vnf_image_format = self.image_format
+
+ self.dir_functest_data = getattr(config.CONF, 'dir_functest_data')
class TestcaseConfig(object):
@@ -104,7 +132,8 @@ class TestcaseConfig(object):
testcases_yaml = yaml.safe_load(f)
test_config = testcases_yaml['testcases'].get(testcase, None)
if test_config is None:
- logger.error('Test {0} configuration is not present in {1}'
- .format(testcase, common_config.config_file))
+ logger.error(
+ 'Test %s configuration is not present in %s',
+ testcase, common_config.config_file)
# Update class fields with configuration variables dynamically
self.__dict__.update(**test_config)
diff --git a/sfc/lib/odl_utils.py b/sfc/lib/odl_utils.py
index 45937263..7879eab9 100644
--- a/sfc/lib/odl_utils.py
+++ b/sfc/lib/odl_utils.py
@@ -1,17 +1,26 @@
import ConfigParser
+import functools
+import json
+import logging
import os
+import re
import requests
import time
-import json
-import re
-import logging
-import functest.utils.functest_utils as ft_utils
-import sfc.lib.openstack_utils as os_sfc_utils
+import sfc.lib.openstack_utils as os_sfc_utils
logger = logging.getLogger(__name__)
+ODL_MODULE_EXCEPTIONS = {
+ "service-function-path-state": "service-function-path"
+}
+
+ODL_PLURAL_EXCEPTIONS = {
+ "service-function-path-state": "service-function-paths-state"
+}
+
+
def actual_rsps_in_compute(ovs_logger, compute_ssh):
'''
Example flows that match the regex (line wrapped because of flake8)
@@ -20,31 +29,36 @@ def actual_rsps_in_compute(ovs_logger, compute_ssh):
load:0x27->NXM_NX_NSP[0..23],load:0xff->NXM_NX_NSI[],
load:0xffffff->NXM_NX_NSH_C1[],load:0->NXM_NX_NSH_C2[],resubmit(,17)
'''
- match_rsp = re.compile(
- r'.+tp_dst=([0-9]+).+load:(0x[0-9a-f]+)->NXM_NX_NSP\[0\.\.23\].+')
+ match_rsp = re.compile(r'.+'
+ r'(tp_(?:src|dst)=[0-9]+)'
+ r'.+'
+ r'load:(0x[0-9a-f]+)->NXM_NX_NSP\[0\.\.23\]'
+ r'.+')
# First line is OFPST_FLOW reply (OF1.3) (xid=0x2):
# This is not a flow so ignore
flows = (ovs_logger.ofctl_dump_flows(compute_ssh, 'br-int', '101')
.strip().split('\n')[1:])
matching_flows = [match_rsp.match(f) for f in flows]
- # group(1) = 22 (tp_dst value) | group(2) = 0xff (rsp value)
- rsps_in_compute = ['{0}_{1}'.format(mf.group(2), mf.group(1))
+ # group(1) = tsp_dst=22 | group(2) = 0xff (rsp value)
+ rsps_in_compute = ['{0}|{1}'.format(mf.group(2), mf.group(1))
for mf in matching_flows if mf is not None]
return rsps_in_compute
-def get_active_rsps(odl_ip, odl_port):
+def get_active_rsps_on_ports(odl_ip, odl_port, neutron_ports):
'''
Queries operational datastore and returns the RSPs for which we have
- created a classifier (ACL). These are considered as active RSPs
- for which classification rules should exist in the compute nodes
+ created a classifier (ACL) on the specified neutron ports. These are
+ considered as active RSPs on those ports for which classification rules
+ should exist in the compute node on which such ports are located.
- This function enhances the returned dictionary with the
- destination port of the ACL.
+ This function enhances each returned RSP with the openflow matches on
+ the tcp ports that classify traffic into that RSP.
'''
+ port_ids = [port.id for port in neutron_ports]
acls = get_odl_acl_list(odl_ip, odl_port)
- rsps = []
+ rsps = {}
for acl in acls['access-lists']['acl']:
try:
# We get the first ace. ODL creates a new ACL
@@ -55,76 +69,129 @@ def get_active_rsps(odl_ip, odl_port):
acl['acl-name']))
continue
- if not ('netvirt-sfc-acl:rsp-name' in ace['actions']):
+ matches = ace['matches']
+
+ # We are just interested in the destination-port-range matches
+ # that we use throughout the tests
+ if matches.get('destination-port-range') is None:
continue
+ tcp_port = matches['destination-port-range']['lower-port']
+
+ # A single ace may classify traffic into a forward path
+ # and optionally into a reverse path if destination port is set
+ src_port = matches.get('netvirt-sfc-acl:source-port-uuid')
+ dst_port = matches.get('netvirt-sfc-acl:destination-port-uuid')
+ forward_of_match = None
+ reverse_of_match = None
+ if src_port in port_ids:
+ forward_of_match = 'tp_dst=' + str(tcp_port)
+ if dst_port in port_ids:
+ # For classification to the reverse path
+ # the openflow match inverts
+ reverse_of_match = 'tp_src=' + str(tcp_port)
+
+ # This ACL does not apply to any of the given ports
+ if not forward_of_match and not reverse_of_match:
+ continue
+
+ actions = ace['actions']
+ rsp_names = get_rsps_from_netvirt_acl_actions(odl_ip,
+ odl_port,
+ actions)
+
+ for rsp_name in rsp_names:
+ rsp = rsps.get(rsp_name)
+ if not rsp:
+ rsp = get_rsp(odl_ip, odl_port, rsp_name)
+ of_matches = rsp.get('of-matches', [])
+ if reverse_of_match and rsp.get('reverse-path'):
+ of_matches.append(reverse_of_match)
+ elif forward_of_match and not rsp.get('reverse-path'):
+ of_matches.append(forward_of_match)
+ rsp['of-matches'] = of_matches
+ rsps[rsp_name] = rsp
- rsp_name = ace['actions']['netvirt-sfc-acl:rsp-name']
- rsp = get_odl_resource_elem(odl_ip,
- odl_port,
- 'rendered-service-path',
- rsp_name,
- datastore='operational')
- '''
- Rsps are returned in the format:
- {
- "rendered-service-path": [
- {
- "name": "Path-red-Path-83",
- "path-id": 83,
- ...
- "rendered-service-path-hop": [
- {
- ...
- "service-function-name": "testVNF1",
- "service-index": 255
- ...
- 'rendered-service-path' Is returned as a list with one
- element (we select by name and the names are unique)
- '''
- rsp_port = rsp['rendered-service-path'][0]
- rsp_port['dst-port'] = (ace['matches']
- ['destination-port-range']['lower-port'])
- rsps.append(rsp_port)
- return rsps
-
-
-def promised_rsps_in_computes(odl_ip, odl_port):
+ return rsps.values()
+
+
+def get_rsps_from_netvirt_acl_actions(odl_ip, odl_port, netvirt_acl_actions):
'''
- Return a list of rsp_port which represents the rsp id and the destination
- port configured in ODL
+ Return the list of RSPs referenced from the netvirt sfc redirect action
'''
- rsps = get_active_rsps(odl_ip, odl_port)
- rsps_in_computes = ['{0}_{1}'.format(hex(rsp['path-id']), rsp['dst-port'])
- for rsp in rsps]
+ rsp_names = []
+
+ if 'netvirt-sfc-acl:rsp-name' in netvirt_acl_actions:
+ rsp_names.append(netvirt_acl_actions['netvirt-sfc-acl:rsp-name'])
+
+ if 'netvirt-sfc-acl:sfp-name' in netvirt_acl_actions:
+ # If the acl redirect action is a sfp instead of rsp
+ # we need to get the rsps associated to that sfp
+ sfp_name = netvirt_acl_actions['netvirt-sfc-acl:sfp-name']
+ sfp_state = get_odl_resource_elem(odl_ip,
+ odl_port,
+ 'service-function-path-state',
+ sfp_name,
+ datastore='operational')
+ sfp_rsps = sfp_state.get('sfp-rendered-service-path', [])
+ sfp_rsp_names = [rsp['name'] for rsp in sfp_rsps if 'name' in rsp]
+ rsp_names.extend(sfp_rsp_names)
+
+ return rsp_names
+
+
+def get_rsp(odl_ip, odl_port, rsp_name):
+ rsp = get_odl_resource_elem(odl_ip,
+ odl_port,
+ 'rendered-service-path',
+ rsp_name,
+ datastore='operational')
+ return rsp
+
+
+def promised_rsps_in_compute(odl_ip, odl_port, compute_ports):
+ '''
+ Return a list of rsp|of_match which represents the RSPs and openflow
+ matches on the source/destination port that classify traffic into such
+ RSP as configured in ODL ACLs
+ '''
+ rsps = get_active_rsps_on_ports(odl_ip, odl_port, compute_ports)
+ rsps_in_computes = ['{0}|{1}'.format(hex(rsp['path-id']), of_match)
+ for rsp in rsps
+ for of_match in rsp['of-matches']]
return rsps_in_computes
-@ft_utils.timethis
+def timethis(func):
+ """Measure the time it takes for a function to complete"""
+ @functools.wraps(func)
+ def timed(*args, **kwargs):
+ ts = time.time()
+ result = func(*args, **kwargs)
+ te = time.time()
+ elapsed = '{0}'.format(te - ts)
+ logger.info('{f}(*{a}, **{kw}) took: {t} sec'.format(
+ f=func.__name__, a=args, kw=kwargs, t=elapsed))
+ return result, elapsed
+ return timed
+
+
+@timethis
def wait_for_classification_rules(ovs_logger, compute_nodes, odl_ip, odl_port,
- timeout=200):
+ compute_name, neutron_ports, timeout=200):
'''
Check if the classification rules configured in ODL are implemented in OVS.
We know by experience that this process might take a while
'''
try:
- # Find the compute where the client is
- compute_client = os_sfc_utils.get_compute_client()
-
- for compute_node in compute_nodes:
- if compute_node.name in compute_client:
- compute = compute_node
- try:
- compute
- except NameError:
- logger.debug("No compute where the client is was found")
- raise Exception("No compute where the client is was found")
+ compute = find_compute(compute_name, compute_nodes)
# Find the configured rsps in ODL. Its format is nsp_destPort
promised_rsps = []
timeout2 = 10
while not promised_rsps:
- promised_rsps = promised_rsps_in_computes(odl_ip, odl_port)
+ promised_rsps = promised_rsps_in_compute(odl_ip, odl_port,
+ neutron_ports)
timeout2 -= 1
if timeout2 == 0:
os_sfc_utils.get_tacker_items()
@@ -133,7 +200,8 @@ def wait_for_classification_rules(ovs_logger, compute_nodes, odl_ip, odl_port,
time.sleep(3)
while timeout > 0:
- logger.info("RSPs in ODL Operational DataStore:")
+ logger.info("RSPs in ODL Operational DataStore"
+ "for compute '{}':".format(compute_name))
logger.info("{0}".format(promised_rsps))
# Fetch the rsps implemented in the compute
@@ -177,8 +245,18 @@ def get_odl_ip_port(nodes):
return ip, port
-def pluralize(s):
- return '{0}s'.format(s)
+def pluralize(resource):
+ plural = ODL_PLURAL_EXCEPTIONS.get(resource, None)
+ if not plural:
+ plural = '{0}s'.format(resource)
+ return plural
+
+
+def get_module(resource):
+ module = ODL_MODULE_EXCEPTIONS.get(resource, None)
+ if not module:
+ module = resource
+ return module
def format_odl_resource_list_url(odl_ip, odl_port, resource,
@@ -186,7 +264,8 @@ def format_odl_resource_list_url(odl_ip, odl_port, resource,
odl_pwd='admin'):
return ('http://{usr}:{pwd}@{ip}:{port}/restconf/{ds}/{rsrc}:{rsrcs}'
.format(usr=odl_user, pwd=odl_pwd, ip=odl_ip, port=odl_port,
- ds=datastore, rsrc=resource, rsrcs=pluralize(resource)))
+ ds=datastore, rsrc=get_module(resource),
+ rsrcs=pluralize(resource)))
def format_odl_resource_elem_url(odl_ip, odl_port, resource,
@@ -212,7 +291,12 @@ def get_odl_resource_elem(odl_ip, odl_port, resource,
elem_name, datastore='config'):
url = format_odl_resource_elem_url(
odl_ip, odl_port, resource, elem_name, datastore=datastore)
- return requests.get(url).json()
+ response = requests.get(url).json()
+ # Response is in the format of a dictionary containing
+ # a single value that is an array with the element requested:
+ # {'resource' : [element]}
+ # Return just the element
+ return response.get(resource, [{}])[0]
def delete_odl_resource_elem(odl_ip, odl_port, resource, elem_name,
@@ -283,3 +367,80 @@ def delete_acl(clf_name, odl_ip, odl_port):
odl_port,
'ietf-access-control-list:ipv4-acl',
clf_name)
+
+
+def find_compute(compute_client_name, compute_nodes):
+ for compute_node in compute_nodes:
+ if compute_node.name in compute_client_name:
+ compute = compute_node
+ try:
+ compute
+ except NameError:
+ logger.debug("No compute, where the client is, was found")
+ raise Exception("No compute, where the client is, was found")
+
+ return compute
+
+
+def check_vnffg_deletion(odl_ip, odl_port, ovs_logger, neutron_ports,
+ compute_client_name, compute_nodes, retries=20):
+ '''
+ First, RSPs are checked in the operational datastore of ODL. Nothing
+ should exist. As it might take a while for ODL to remove that, some
+ retries are needed.
+
+ Secondly, we check that the classification rules are removed too
+ '''
+
+ retries_counter = retries
+
+ # Check RSPs
+ while retries_counter > 0:
+ if get_active_rsps_on_ports(odl_ip, odl_port, neutron_ports):
+ retries_counter -= 1
+ time.sleep(3)
+ else:
+ break
+
+ if not retries_counter:
+ logger.debug("RSPs are still active in the MD-SAL")
+ return False
+
+ # Get the compute where the client is running
+ try:
+ compute = find_compute(compute_client_name, compute_nodes)
+ except Exception as e:
+ logger.debug("There was an error getting the compute: e" % e)
+
+ retries_counter = retries
+
+ # Check classification flows
+ while retries_counter > 0:
+ if (actual_rsps_in_compute(ovs_logger, compute.ssh_client)):
+ retries_counter -= 1
+ time.sleep(3)
+ else:
+ break
+
+ if not retries_counter:
+ logger.debug("Classification flows still in the compute")
+ return False
+
+ return True
+
+
+def create_chain(tacker_client, default_param_file, neutron_port,
+ COMMON_CONFIG, TESTCASE_CONFIG):
+
+ tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnffgd_dir,
+ TESTCASE_CONFIG.test_vnffgd_red)
+
+ os_sfc_utils.create_vnffgd(tacker_client,
+ tosca_file=tosca_file,
+ vnffgd_name='red')
+
+ os_sfc_utils.create_vnffg_with_param_file(tacker_client, 'red',
+ 'red_http',
+ default_param_file,
+ neutron_port.id)
diff --git a/sfc/lib/openstack_utils.py b/sfc/lib/openstack_utils.py
index f55f62e8..09b93f37 100644
--- a/sfc/lib/openstack_utils.py
+++ b/sfc/lib/openstack_utils.py
@@ -1,35 +1,29 @@
-import logging
import os
import time
import json
+import logging
import yaml
-from tackerclient.tacker import client as tackerclient
-from functest.utils.constants import CONST
-from snaps.openstack.tests import openstack_tests
+from tackerclient.tacker import client as tackerclient
+from functest.utils import constants
+from functest.utils import env
+from snaps.openstack.tests import openstack_tests
from snaps.openstack.create_image import OpenStackImage
from snaps.config.image import ImageConfig
-
from snaps.config.flavor import FlavorConfig
from snaps.openstack.create_flavor import OpenStackFlavor
-
from snaps.config.network import NetworkConfig, SubnetConfig, PortConfig
from snaps.openstack.create_network import OpenStackNetwork
-
from snaps.config.router import RouterConfig
from snaps.openstack.create_router import OpenStackRouter
-
from snaps.config.security_group import (
Protocol, SecurityGroupRuleConfig, Direction, SecurityGroupConfig)
-
from snaps.openstack.create_security_group import OpenStackSecurityGroup
-
import snaps.openstack.create_instance as cr_inst
from snaps.config.vm_inst import VmInstanceConfig, FloatingIpConfig
-
from snaps.openstack.utils import (
- nova_utils, neutron_utils, glance_utils, heat_utils, keystone_utils)
+ nova_utils, neutron_utils, heat_utils, keystone_utils)
logger = logging.getLogger(__name__)
DEFAULT_TACKER_API_VERSION = '1.0'
@@ -39,16 +33,28 @@ class OpenStackSFC:
def __init__(self):
self.os_creds = openstack_tests.get_credentials(
- os_env_file=CONST.__getattribute__('openstack_creds'))
+ os_env_file=constants.ENV_FILE)
self.creators = []
self.nova = nova_utils.nova_client(self.os_creds)
self.neutron = neutron_utils.neutron_client(self.os_creds)
- self.glance = glance_utils.glance_client(self.os_creds)
self.heat = heat_utils.heat_client(self.os_creds)
+ self.keystone = keystone_utils.keystone_client(self.os_creds)
def register_glance_image(self, name, url, img_format, public):
- image_settings = ImageConfig(name=name, img_format=img_format, url=url,
- public=public, image_user='admin')
+ logger.info("Registering the image...")
+ # Check whether the image is local or not
+ if 'http' in url:
+ image_settings = ImageConfig(name=name,
+ img_format=img_format,
+ url=url,
+ public=public,
+ image_user='admin')
+ else:
+ image_settings = ImageConfig(name=name,
+ img_format=img_format,
+ image_file=url,
+ public=public,
+ image_user='admin')
# TODO Remove this when tacker is part of SNAPS
self.image_settings = image_settings
@@ -60,6 +66,7 @@ class OpenStackSFC:
return image_creator
def create_flavor(self, name, ram, disk, vcpus):
+ logger.info("Creating the flavor...")
flavor_settings = FlavorConfig(name=name, ram=ram, disk=disk,
vcpus=vcpus)
flavor_creator = OpenStackFlavor(self.os_creds, flavor_settings)
@@ -70,6 +77,7 @@ class OpenStackSFC:
def create_network_infrastructure(self, net_name, subnet_name, subnet_cidr,
router_name):
+ logger.info("Creating networks...")
# Network and subnet
subnet_settings = SubnetConfig(name=subnet_name, cidr=subnet_cidr)
network_settings = NetworkConfig(name=net_name,
@@ -80,7 +88,8 @@ class OpenStackSFC:
self.creators.append(network_creator)
# Router
- ext_network_name = CONST.__getattribute__('EXTERNAL_NETWORK')
+ logger.info("Creating the router...")
+ ext_network_name = env.get('EXTERNAL_NETWORK')
router_settings = RouterConfig(name=router_name,
external_gateway=ext_network_name,
@@ -94,6 +103,7 @@ class OpenStackSFC:
return network, router
def create_security_group(self, sec_grp_name):
+ logger.info("Creating the security groups...")
rule_ping = SecurityGroupRuleConfig(sec_grp_name=sec_grp_name,
direction=Direction.ingress,
protocol=Protocol.icmp)
@@ -125,7 +135,7 @@ class OpenStackSFC:
def create_instance(self, vm_name, flavor_name, image_creator, network,
secgrp, av_zone):
-
+ logger.info("Creating the instance {}...".format(vm_name))
port_settings = PortConfig(name=vm_name + '-port',
network_name=network.name)
@@ -156,12 +166,30 @@ class OpenStackSFC:
'''
Return the compute where the client sits
'''
- compute = nova_utils.get_server(self.nova, server_name='client')
- return compute
+ return self.get_vm_compute('client')
+
+ def get_compute_server(self):
+ '''
+ Return the compute where the server sits
+ '''
+ return self.get_vm_compute('server')
+
+ def get_vm_compute(self, vm_name):
+ '''
+ Return the compute where the vm sits
+ '''
+ for creator in self.creators:
+ # We want to filter the vm creators
+ if hasattr(creator, 'get_vm_inst'):
+ # We want to fetch by vm_name
+ if creator.get_vm_inst().name == vm_name:
+ return creator.get_vm_inst().compute_host
+
+ raise Exception("There is no VM with name '{}'!!".format(vm_name))
def assign_floating_ip(self, router, vm, vm_creator):
'''
- Assign a floating ips to all the VMs
+ Assign floating ips to all the VMs
'''
name = vm.name + "-float"
port_name = vm.ports[0].name
@@ -173,41 +201,80 @@ class OpenStackSFC:
return ip.ip
# We need this function because tacker VMs cannot be created through SNAPs
- def assign_floating_ip_vnfs(self, router):
+ def assign_floating_ip_vnfs(self, router, ips=None):
'''
- Assign a floating ips to all the SFs
+ Assign floating ips to all the SFs. Optionally specify the
+ subnet IPs that a floating IP should be assigned to, assuming that the
+ SF is connected to a single subnet globally and per port.
'''
stacks = self.heat.stacks.list()
fips = []
+ project_name = 'admin'
for stack in stacks:
servers = heat_utils.get_stack_servers(self.heat,
self.nova,
self.neutron,
- stack)
+ self.keystone,
+ stack,
+ project_name)
sf_creator = cr_inst.generate_creator(self.os_creds,
servers[0],
- self.image_settings)
- port_name = servers[0].ports[0].name
+ self.image_settings,
+ project_name)
+
name = servers[0].name + "-float"
+ if ips is None:
+ port_name = servers[0].ports[0].name
+ else:
+ port_name = None
+ for port in servers[0].ports:
+ if port.ips[0]['ip_address'] in ips:
+ port_name = port.name
+ break
+
+ if port_name is None:
+ err_msg = "The VNF {} does not have any suitable port {} " \
+ "for floating IP assignment".format(
+ name,
+ 'with ip any of ' + str(ips) if ips else '')
+ logger.error(err_msg)
+ raise Exception(err_msg)
+
float_ip = FloatingIpConfig(name=name,
port_name=port_name,
router_name=router.name)
ip = sf_creator.add_floating_ip(float_ip)
+ self.creators.append(sf_creator)
fips.append(ip.ip)
return fips
- def get_client_port_id(self, vm):
+ def get_client_port(self, vm, vm_creator):
'''
Get the neutron port id of the client
'''
- port_id = neutron_utils.get_port(self.neutron,
- port_name=vm.name + "-port")
- return port_id
+ port_name = vm.name + "-port"
+ port = vm_creator.get_port_by_name(port_name)
+ if port is not None:
+ return port
+ else:
+ logger.error("The VM {0} does not have any port"
+ " with name {1}".format(vm.name, port_name))
+ raise Exception("Client VM does not have the desired port")
+
+ def delete_all_security_groups(self):
+ '''
+ Deletes all the available security groups
-# TACKER SECTION #
+ Needed until this bug is fixed:
+ https://bugs.launchpad.net/networking-odl/+bug/1763705
+ '''
+ sec_groups = neutron_utils.list_security_groups(self.neutron)
+ for sg in sec_groups:
+ neutron_utils.delete_security_group(self.neutron, sg)
+# TACKER SECTION #
def get_tacker_client_version():
api_version = os.getenv('OS_TACKER_API_VERSION')
if api_version is not None:
@@ -219,7 +286,7 @@ def get_tacker_client_version():
def get_tacker_client(other_creds={}):
creds_override = None
os_creds = openstack_tests.get_credentials(
- os_env_file=CONST.__getattribute__('openstack_creds'),
+ os_env_file=constants.ENV_FILE,
overrides=creds_override)
sess = keystone_utils.keystone_session(os_creds)
return tackerclient.Client(get_tacker_client_version(), session=sess)
@@ -227,12 +294,12 @@ def get_tacker_client(other_creds={}):
def get_id_from_name(tacker_client, resource_type, resource_name):
try:
- req_params = {'fields': 'id', 'name': resource_name}
- endpoint = '/{0}s'.format(resource_type)
- resp = tacker_client.get(endpoint, params=req_params)
- endpoint = endpoint.replace('-', '_')
- return resp[endpoint[1:]][0]['id']
- except Exception, e:
+ params = {'fields': 'id', 'name': resource_name}
+ collection = resource_type + 's'
+ path = '/' + collection
+ resp = tacker_client.list(collection, path, **params)
+ return resp[collection][0]['id']
+ except Exception as e:
logger.error("Error [get_id_from_name(tacker_client, "
"resource_type, resource_name)]: %s" % e)
return None
@@ -280,22 +347,24 @@ def list_vnfds(tacker_client, verbose=False):
if not verbose:
vnfds = [vnfd['id'] for vnfd in vnfds['vnfds']]
return vnfds
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vnfds(tacker_client)]: %s" % e)
return None
def create_vnfd(tacker_client, tosca_file=None, vnfd_name=None):
+ logger.info("Creating the vnfd...")
try:
vnfd_body = {}
if tosca_file is not None:
with open(tosca_file) as tosca_fd:
- vnfd_body = tosca_fd.read()
- logger.info('VNFD template:\n{0}'.format(vnfd_body))
+ vnfd = tosca_fd.read()
+ vnfd_body = yaml.safe_load(vnfd)
+ logger.info('VNFD template:\n{0}'.format(vnfd))
return tacker_client.create_vnfd(
body={"vnfd": {"attributes": {"vnfd": vnfd_body},
"name": vnfd_name}})
- except Exception, e:
+ except Exception as e:
logger.error("Error [create_vnfd(tacker_client, '%s')]: %s"
% (tosca_file, e))
return None
@@ -309,7 +378,7 @@ def delete_vnfd(tacker_client, vnfd_id=None, vnfd_name=None):
raise Exception('You need to provide VNFD id or VNFD name')
vnfd = get_vnfd_id(tacker_client, vnfd_name)
return tacker_client.delete_vnfd(vnfd)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vnfd(tacker_client, '%s', '%s')]: %s"
% (vnfd_id, vnfd_name, e))
return None
@@ -321,13 +390,14 @@ def list_vnfs(tacker_client, verbose=False):
if not verbose:
vnfs = [vnf['id'] for vnf in vnfs['vnfs']]
return vnfs
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vnfs(tacker_client)]: %s" % e)
return None
def create_vnf(tacker_client, vnf_name, vnfd_id=None,
vnfd_name=None, vim_id=None, vim_name=None, param_file=None):
+ logger.info("Creating the vnf...")
try:
vnf_body = {
'vnf': {
@@ -356,7 +426,7 @@ def create_vnf(tacker_client, vnf_name, vnfd_id=None,
vnf_body['vnf']['vim_id'] = get_vim_id(tacker_client, vim_name)
return tacker_client.create_vnf(body=vnf_body)
- except Exception, e:
+ except Exception as e:
logger.error("error [create_vnf(tacker_client,"
" '%s', '%s', '%s')]: %s"
% (vnf_name, vnfd_id, vnfd_name, e))
@@ -376,12 +446,28 @@ def get_vnf(tacker_client, vnf_id=None, vnf_name=None):
else:
raise Exception('Could not retrieve ID from name [%s]' % vnf_name)
- except Exception, e:
+ except Exception as e:
logger.error("Could not retrieve VNF [vnf_id=%s, vnf_name=%s] - %s"
% (vnf_id, vnf_name, e))
return None
+def get_vnf_ip(tacker_client, vnf_id=None, vnf_name=None):
+ """
+ Get the management ip of the first VNF component as obtained from the
+ tacker REST API:
+
+ {
+ "vnf": {
+ ...
+ "mgmt_url": "{\"VDU1\": \"192.168.120.3\"}",
+ ...
+ }
+ """
+ vnf = get_vnf(tacker_client, vnf_id, vnf_name)
+ return json.loads(vnf['mgmt_url']).values()[0]
+
+
def wait_for_vnf(tacker_client, vnf_id=None, vnf_name=None, timeout=100):
try:
vnf = get_vnf(tacker_client, vnf_id, vnf_name)
@@ -401,7 +487,7 @@ def wait_for_vnf(tacker_client, vnf_id=None, vnf_name=None, timeout=100):
raise Exception('Timeout when booting vnf %s' % vnf['id'])
return vnf['id']
- except Exception, e:
+ except Exception as e:
logger.error("error [wait_for_vnf(tacker_client, '%s', '%s')]: %s"
% (vnf_id, vnf_name, e))
return None
@@ -415,13 +501,14 @@ def delete_vnf(tacker_client, vnf_id=None, vnf_name=None):
raise Exception('You need to provide a VNF id or name')
vnf = get_vnf_id(tacker_client, vnf_name)
return tacker_client.delete_vnf(vnf)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vnf(tacker_client, '%s', '%s')]: %s"
% (vnf_id, vnf_name, e))
return None
def create_vim(tacker_client, vim_file=None):
+ logger.info("Creating the vim...")
try:
vim_body = {}
if vim_file is not None:
@@ -429,38 +516,42 @@ def create_vim(tacker_client, vim_file=None):
vim_body = json.load(vim_fd)
logger.info('VIM template:\n{0}'.format(vim_body))
return tacker_client.create_vim(body=vim_body)
- except Exception, e:
+ except Exception as e:
logger.error("Error [create_vim(tacker_client, '%s')]: %s"
% (vim_file, e))
return None
def create_vnffgd(tacker_client, tosca_file=None, vnffgd_name=None):
+ logger.info("Creating the vnffgd...")
try:
vnffgd_body = {}
if tosca_file is not None:
with open(tosca_file) as tosca_fd:
- vnffgd_body = yaml.safe_load(tosca_fd)
- logger.info('VNFFGD template:\n{0}'.format(vnffgd_body))
+ vnffgd = tosca_fd.read()
+ vnffgd_body = yaml.safe_load(vnffgd)
+ logger.info('VNFFGD template:\n{0}'.format(vnffgd))
return tacker_client.create_vnffgd(
body={'vnffgd': {'name': vnffgd_name,
'template': {'vnffgd': vnffgd_body}}})
- except Exception, e:
+ except Exception as e:
logger.error("Error [create_vnffgd(tacker_client, '%s')]: %s"
% (tosca_file, e))
return None
def create_vnffg(tacker_client, vnffg_name=None, vnffgd_id=None,
- vnffgd_name=None, param_file=None):
+ vnffgd_name=None, param_file=None, symmetrical=False):
'''
Creates the vnffg which will provide the RSP and the classifier
'''
+ logger.info("Creating the vnffg...")
try:
vnffg_body = {
'vnffg': {
'attributes': {},
- 'name': vnffg_name
+ 'name': vnffg_name,
+ 'symmetrical': symmetrical
}
}
if param_file is not None:
@@ -477,7 +568,7 @@ def create_vnffg(tacker_client, vnffg_name=None, vnffgd_id=None,
vnffg_body['vnffg']['vnffgd_id'] = get_vnffgd_id(tacker_client,
vnffgd_name)
return tacker_client.create_vnffg(body=vnffg_body)
- except Exception, e:
+ except Exception as e:
logger.error("error [create_vnffg(tacker_client,"
" '%s', '%s', '%s')]: %s"
% (vnffg_name, vnffgd_id, vnffgd_name, e))
@@ -490,7 +581,7 @@ def list_vnffgds(tacker_client, verbose=False):
if not verbose:
vnffgds = [vnffgd['id'] for vnffgd in vnffgds['vnffgds']]
return vnffgds
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vnffgds(tacker_client)]: %s" % e)
return None
@@ -501,7 +592,7 @@ def list_vnffgs(tacker_client, verbose=False):
if not verbose:
vnffgs = [vnffg['id'] for vnffg in vnffgs['vnffgs']]
return vnffgs
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vnffgs(tacker_client)]: %s" % e)
return None
@@ -514,7 +605,7 @@ def delete_vnffg(tacker_client, vnffg_id=None, vnffg_name=None):
raise Exception('You need to provide a VNFFG id or name')
vnffg = get_vnffg_id(tacker_client, vnffg_name)
return tacker_client.delete_vnffg(vnffg)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vnffg(tacker_client, '%s', '%s')]: %s"
% (vnffg_id, vnffg_name, e))
return None
@@ -528,7 +619,7 @@ def delete_vnffgd(tacker_client, vnffgd_id=None, vnffgd_name=None):
raise Exception('You need to provide VNFFGD id or VNFFGD name')
vnffgd = get_vnffgd_id(tacker_client, vnffgd_name)
return tacker_client.delete_vnffgd(vnffgd)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vnffgd(tacker_client, '%s', '%s')]: %s"
% (vnffgd_id, vnffgd_name, e))
return None
@@ -540,7 +631,7 @@ def list_vims(tacker_client, verbose=False):
if not verbose:
vims = [vim['id'] for vim in vims['vims']]
return vims
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vims(tacker_client)]: %s" % e)
return None
@@ -553,7 +644,7 @@ def delete_vim(tacker_client, vim_id=None, vim_name=None):
raise Exception('You need to provide VIM id or VIM name')
vim = get_vim_id(tacker_client, vim_name)
return tacker_client.delete_vim(vim)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vim(tacker_client, '%s', '%s')]: %s"
% (vim_id, vim_name, e))
return None
@@ -574,9 +665,8 @@ def register_vim(tacker_client, vim_file=None):
with open(vim_file) as f:
json_dict = json.load(f)
- json_dict['vim']['auth_url'] = CONST.__getattribute__('OS_AUTH_URL')
- json_dict['vim']['auth_cred']['password'] = CONST.__getattribute__(
- 'OS_PASSWORD')
+ json_dict['vim']['auth_url'] = os.environ['OS_AUTH_URL']
+ json_dict['vim']['auth_cred']['password'] = os.environ['OS_PASSWORD']
json.dump(json_dict, open(tmp_file, 'w'))
@@ -609,19 +699,28 @@ def create_vnf_in_av_zone(
def create_vnffg_with_param_file(tacker_client, vnffgd_name, vnffg_name,
- default_param_file, neutron_port):
+ default_param_file, client_port,
+ server_port=None, server_ip=None):
param_file = default_param_file
-
- if neutron_port is not None:
+ data = {}
+ if client_port:
+ data['net_src_port_id'] = client_port
+ if server_port:
+ data['net_dst_port_id'] = server_port
+ if server_ip:
+ data['ip_dst_prefix'] = server_ip
+
+ if client_port is not None or server_port is not None:
param_file = os.path.join(
'/tmp',
- 'param_{0}.json'.format(neutron_port))
- data = {
- 'net_src_port_id': neutron_port
- }
+ 'param_{0}.json'.format(vnffg_name))
with open(param_file, 'w+') as f:
json.dump(data, f)
+
+ symmetrical = True if client_port and server_port else False
+
create_vnffg(tacker_client,
vnffgd_name=vnffgd_name,
vnffg_name=vnffg_name,
- param_file=param_file)
+ param_file=param_file,
+ symmetrical=symmetrical)
diff --git a/sfc/lib/results.py b/sfc/lib/results.py
index 15d82e02..2f2edfc0 100644
--- a/sfc/lib/results.py
+++ b/sfc/lib/results.py
@@ -7,7 +7,6 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-
import logging
logger = logging.getLogger(__name__)
diff --git a/sfc/lib/test_utils.py b/sfc/lib/test_utils.py
index 9cdc02b2..36b52755 100644
--- a/sfc/lib/test_utils.py
+++ b/sfc/lib/test_utils.py
@@ -10,10 +10,9 @@
import os
import subprocess
import time
-
+import shutil
+import urllib
import logging
-import functest.utils.functest_utils as ft_utils
-
logger = logging.getLogger(__name__)
SSH_OPTIONS = '-q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
@@ -51,13 +50,29 @@ def run_cmd_remote(ip, cmd, username="root", passwd="opnfv"):
return run_cmd(ssh_cmd)
+def download_url(url, dest_path):
+ """
+ Download a file to a destination path given a URL
+ """
+ name = url.rsplit('/')[-1]
+ dest = dest_path + "/" + name
+ try:
+ response = urllib.urlopen(url)
+ except Exception:
+ return False
+
+ with open(dest, 'wb') as lfile:
+ shutil.copyfileobj(response, lfile)
+ return True
+
+
def download_image(url, image_path):
image_filename = os.path.basename(image_path)
image_url = "%s/%s" % (url, image_filename)
image_dir = os.path.dirname(image_path)
if not os.path.isfile(image_path):
logger.info("Downloading image")
- ft_utils.download_url(image_url, image_dir)
+ download_url(image_url, image_dir)
else:
logger.info("Using old image")
@@ -95,7 +110,7 @@ def start_http_server(ip, iterations_check=10):
logger.info(output)
while iterations_check > 0:
- _, output, _ = run_cmd_remote(ip, "ss -na | grep *:80")
+ _, output, _ = run_cmd_remote(ip, "netstat -pntl | grep :80")
if output:
return True
else:
@@ -107,17 +122,20 @@ def start_http_server(ip, iterations_check=10):
return False
-def start_vxlan_tool(remote_ip, interface="eth0", block=None):
+def start_vxlan_tool(remote_ip, interface="eth0", output=None, block=None):
"""
Starts vxlan_tool on a remote host.
vxlan_tool.py converts a regular Service Function into a NSH-aware SF
when the "--do forward" option is used, it decrements the NSI appropiately.
- 'block' parameters allows to specify a port where packets will be dropped.
+ 'output' allows to specify an interface through which to forward if
+ different than the input interface.
+ 'block' parameter allows to specify a port where packets will be dropped.
"""
command = "nohup python /root/vxlan_tool.py"
- options = "{do} {interface} {block_option}".format(
+ options = "{do} {interface} {output_option} {block_option}".format(
do="--do forward",
interface="--interface {}".format(interface),
+ output_option="--output {}".format(output) if output else "",
block_option="--block {}".format(block) if block is not None else "")
output_redirection = "> /dev/null 2>&1"
diff --git a/sfc/tests/functest/config-pike.yaml b/sfc/tests/functest/config-pike.yaml
new file mode 100644
index 00000000..eff95c08
--- /dev/null
+++ b/sfc/tests/functest/config-pike.yaml
@@ -0,0 +1,84 @@
+---
+defaults:
+ # odl-sfc uses custom flavors as per below params
+ flavor: custom
+ ram_size_in_mb: 500
+ disk_size_in_gb: 1
+ vcpu_count: 1
+ image_name: sfc_nsh_fraser
+ installer:
+ fuel:
+ user: root
+ password: r00tme
+ cluster: 1 # Change this to the id of the desired fuel env (1, 2, 3...)
+ apex:
+ user: stack
+ pkey_file: "/root/.ssh/id_rsa"
+ osa:
+ user: root
+ pkey_file: "/root/.ssh/id_rsa"
+ compass:
+ user: root
+ pkey_file: "/root/.ssh/id_rsa"
+ image_format: qcow2
+ image_url: "http://artifacts.opnfv.org/sfc/images/sfc_nsh_fraser.qcow2"
+ vnfd-dir: "vnfd-templates"
+ vnfd-default-params-file: "test-vnfd-default-params.yaml"
+
+
+testcases:
+ sfc_one_chain_two_service_functions:
+ enabled: true
+ order: 0
+ description: "ODL-SFC Testing SFs when they are located on the same chain"
+ net_name: example-net
+ subnet_name: example-subnet
+ router_name: example-router
+ subnet_cidr: "11.0.0.0/24"
+ secgroup_name: "example-sg"
+ secgroup_descr: "Example Security group"
+ test_vnfd_red: "test-one-chain-vnfd1.yaml"
+ test_vnfd_blue: "test-one-chain-vnfd2.yaml"
+ test_vnffgd_red: "test-one-chain-vnffgd-pike.yaml"
+
+ sfc_two_chains_SSH_and_HTTP:
+ enabled: false
+ order: 1
+ description: "ODL-SFC tests with two chains and one SF per chain"
+ net_name: example-net
+ subnet_name: example-subnet
+ router_name: example-router
+ subnet_cidr: "11.0.0.0/24"
+ secgroup_name: "example-sg"
+ secgroup_descr: "Example Security group"
+ test_vnfd_red: "test-two-chains-vnfd1.yaml"
+ test_vnfd_blue: "test-two-chains-vnfd2.yaml"
+ test_vnffgd_red: "test-two-chains-vnffgd1-pike.yaml"
+ test_vnffgd_blue: "test-two-chains-vnffgd2-pike.yaml"
+
+ sfc_symmetric_chain:
+ enabled: false
+ order: 2
+ description: "Verify the behavior of a symmetric service chain"
+ net_name: example-net
+ subnet_name: example-subnet
+ router_name: example-router
+ subnet_cidr: "11.0.0.0/24"
+ secgroup_name: "example-sg"
+ secgroup_descr: "Example Security group"
+ test_vnfd: "test-symmetric-vnfd.yaml"
+ test_vnffgd: "test-symmetric-vnffgd.yaml"
+ source_port: 22222
+
+ sfc_chain_deletion:
+ enabled: false
+ order: 3
+ description: "Verify if chains work correctly after deleting one"
+ net_name: example-net
+ subnet_name: example-subnet
+ router_name: example-router
+ subnet_cidr: "11.0.0.0/24"
+ secgroup_name: "example-sg"
+ secgroup_descr: "Example Security group"
+ test_vnfd_red: "test-one-chain-vnfd1.yaml"
+ test_vnffgd_red: "test-deletion-vnffgd-pike.yaml"
diff --git a/sfc/tests/functest/config.yaml b/sfc/tests/functest/config.yaml
index be37e626..9c743553 100644
--- a/sfc/tests/functest/config.yaml
+++ b/sfc/tests/functest/config.yaml
@@ -2,10 +2,10 @@
defaults:
# odl-sfc uses custom flavors as per below params
flavor: custom
- ram_size_in_mb: 1500
- disk_size_in_gb: 10
+ ram_size_in_mb: 500
+ disk_size_in_gb: 1
vcpu_count: 1
- image_name: sfc_nsh_euphrates
+ image_name: sfc_nsh_fraser
installer:
fuel:
user: root
@@ -17,14 +17,22 @@ defaults:
osa:
user: root
pkey_file: "/root/.ssh/id_rsa"
+ compass:
+ user: root
+ pkey_file: "/root/.ssh/id_rsa"
image_format: qcow2
- image_url: "http://artifacts.opnfv.org/sfc/images/sfc_nsh_euphrates.qcow2"
+ image_url: "http://artifacts.opnfv.org/sfc/images/sfc_nsh_fraser.qcow2"
vnfd-dir: "vnfd-templates"
vnfd-default-params-file: "test-vnfd-default-params.yaml"
+ # [OPTIONAL] Only when deploying VNFs without the default image (vxlan_tool)
+ # vnf_image_name: xxx
+ # vnf_image_format: yyy
+ # vnf_image_url: zzz
testcases:
sfc_one_chain_two_service_functions:
+ class_name: "SfcOneChainTwoServiceTC"
enabled: true
order: 0
description: "ODL-SFC Testing SFs when they are located on the same chain"
@@ -34,11 +42,20 @@ testcases:
subnet_cidr: "11.0.0.0/24"
secgroup_name: "example-sg"
secgroup_descr: "Example Security group"
+ vnf_names:
+ - 'testVNF1'
+ - 'testVNF2'
+ supported_installers:
+ - 'fuel'
+ - 'apex'
+ - 'osa'
+ - 'compass'
test_vnfd_red: "test-one-chain-vnfd1.yaml"
test_vnfd_blue: "test-one-chain-vnfd2.yaml"
test_vnffgd_red: "test-one-chain-vnffgd.yaml"
sfc_two_chains_SSH_and_HTTP:
+ class_name: "SfcTwoChainsSSHandHTTP"
enabled: false
order: 1
description: "ODL-SFC tests with two chains and one SF per chain"
@@ -48,12 +65,21 @@ testcases:
subnet_cidr: "11.0.0.0/24"
secgroup_name: "example-sg"
secgroup_descr: "Example Security group"
+ vnf_names:
+ - 'testVNF1'
+ - 'testVNF2'
+ supported_installers:
+ - 'fuel'
+ - 'apex'
+ - 'osa'
+ - 'compass'
test_vnfd_red: "test-two-chains-vnfd1.yaml"
test_vnfd_blue: "test-two-chains-vnfd2.yaml"
test_vnffgd_red: "test-two-chains-vnffgd1.yaml"
test_vnffgd_blue: "test-two-chains-vnffgd2.yaml"
sfc_symmetric_chain:
+ class_name: "SfcSymmetricChain"
enabled: false
order: 2
description: "Verify the behavior of a symmetric service chain"
@@ -63,6 +89,35 @@ testcases:
subnet_cidr: "11.0.0.0/24"
secgroup_name: "example-sg"
secgroup_descr: "Example Security group"
+ vnf_names:
+ - 'testVNF1'
+ supported_installers:
+ - 'fuel'
+ - 'apex'
+ - 'osa'
+ - 'compass'
test_vnfd: "test-symmetric-vnfd.yaml"
- allowed_source_port: 22222
- blocked_source_port: 33333
+ test_vnffgd: "test-symmetric-vnffgd.yaml"
+ source_port: 22222
+
+ sfc_chain_deletion:
+ class_name: "SfcChainDeletion"
+ enabled: false
+ order: 3
+ description: "Verify if chains work correctly after deleting one"
+ net_name: example-net
+ subnet_name: example-subnet
+ router_name: example-router
+ subnet_cidr: "11.0.0.0/24"
+ secgroup_name: "example-sg"
+ secgroup_descr: "Example Security group"
+ vnf_names:
+ - 'testVNF1'
+ - 'testVNF2'
+ supported_installers:
+ - 'fuel'
+ - 'apex'
+ - 'osa'
+ - 'compass'
+ test_vnfd_red: "test-one-chain-vnfd1.yaml"
+ test_vnffgd_red: "test-deletion-vnffgd.yaml"
diff --git a/sfc/tests/functest/register-vim.json b/sfc/tests/functest/register-vim.json
index 00719449..342fd337 100644
--- a/sfc/tests/functest/register-vim.json
+++ b/sfc/tests/functest/register-vim.json
@@ -9,7 +9,8 @@
"username": "admin",
"user_domain_name": "Default",
"password": "",
- "user_id": ""
+ "user_id": "",
+ "cert_verify": "False"
},
"auth_url": "",
"type": "openstack",
diff --git a/sfc/tests/functest/register-vim.json-queens b/sfc/tests/functest/register-vim.json-queens
new file mode 100644
index 00000000..342fd337
--- /dev/null
+++ b/sfc/tests/functest/register-vim.json-queens
@@ -0,0 +1,19 @@
+{
+ "vim": {
+ "vim_project": {
+ "project_domain_name": "Default",
+ "id": "",
+ "name": "admin"
+ },
+ "auth_cred": {
+ "username": "admin",
+ "user_domain_name": "Default",
+ "password": "",
+ "user_id": "",
+ "cert_verify": "False"
+ },
+ "auth_url": "",
+ "type": "openstack",
+ "name": "test-vim"
+ }
+}
diff --git a/sfc/tests/functest/run_sfc_tests.py b/sfc/tests/functest/run_sfc_tests.py
index a1e73040..7c0b9d15 100644
--- a/sfc/tests/functest/run_sfc_tests.py
+++ b/sfc/tests/functest/run_sfc_tests.py
@@ -1,4 +1,4 @@
-#!/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
@@ -11,18 +11,17 @@
import importlib
import os
import time
+import logging
import sys
import yaml
-from functest.core import testcase
+from collections import OrderedDict
from opnfv.utils import ovs_logger as ovs_log
from opnfv.deployment.factory import Factory as DeploymentFactory
from sfc.lib import cleanup as sfc_cleanup
from sfc.lib import config as sfc_config
from sfc.lib import odl_utils as odl_utils
-
-from collections import OrderedDict
-import logging
+from xtesting.core import testcase
logger = logging.getLogger(__name__)
COMMON_CONFIG = sfc_config.CommonConfig()
@@ -104,7 +103,7 @@ class SfcFunctest(testcase.TestCase):
self.__disable_heat_resource_finder_cache_apex(controllers)
elif installer_type == "fuel":
self.__disable_heat_resource_finder_cache_fuel(controllers)
- elif installer_type == "osa":
+ elif installer_type == "osa" or "compass":
pass
else:
raise Exception('Unsupported installer')
@@ -149,12 +148,20 @@ class SfcFunctest(testcase.TestCase):
(test_name, test_descr))
logger.info(title)
logger.info("%s\n" % ("=" * len(title)))
- t = importlib.import_module(
+ module = importlib.import_module(
"sfc.tests.functest.{0}".format(test_name),
package=None)
+
+ testcase_config = sfc_config.TestcaseConfig(test_name)
+ supported_installers = test_cfg['supported_installers']
+ vnf_names = test_cfg['vnf_names']
+
+ tc_class = getattr(module, test_cfg['class_name'])
+ tc_instance = tc_class(testcase_config, supported_installers,
+ vnf_names)
start_time = time.time()
try:
- result, creators = t.main()
+ result, creators = tc_instance.run()
except Exception as e:
logger.error("Exception when executing: %s" % test_name)
logger.error(e)
@@ -187,8 +194,7 @@ class SfcFunctest(testcase.TestCase):
return testcase.TestCase.EX_RUN_ERROR
-if __name__ == '__main__':
- logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s'
- '- %(levelname)s - %(message)s')
+def main():
+ logging.basicConfig(level=logging.INFO)
SFC = SfcFunctest()
sys.exit(SFC.run())
diff --git a/sfc/tests/functest/sfc_chain_deletion.py b/sfc/tests/functest/sfc_chain_deletion.py
new file mode 100644
index 00000000..849c2971
--- /dev/null
+++ b/sfc/tests/functest/sfc_chain_deletion.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import threading
+import logging
+import sfc.lib.odl_utils as odl_utils
+import sfc.lib.config as sfc_config
+import sfc.lib.test_utils as test_utils
+from sfc.tests.functest import sfc_parent_function
+
+logger = logging.getLogger(__name__)
+
+
+class SfcChainDeletion(sfc_parent_function.SfcCommonTestCase):
+ """We create one client and one server using nova.
+ Then, a SF is created using tacker.
+ A service chain routing the traffic
+ throught this SF will be created as well.
+ After that the chain is deleted and re-created.
+ Finally, the vxlan tool is used in order to check a single
+ HTTP traffic scenario.
+ """
+ def run(self):
+
+ logger.info("The test scenario %s is starting", __name__)
+ self.create_custom_vnfd(self.testcase_config.test_vnfd_red,
+ 'test-vnfd1')
+ self.create_custom_av(self.vnfs[0], 'test-vnfd1', 'test-vim')
+
+ self.create_chain(self.testcase_config)
+
+ t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.compute_host,
+ [self.neutron_port],))
+
+ try:
+ t1.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
+
+ logger.info("Assigning floating IPs to instances")
+
+ self.assign_floating_ip_client_server()
+ self.assign_floating_ip_sfs()
+
+ self.check_floating_ips()
+
+ self.start_services_in_vm()
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t1.join()
+
+ self.remove_vnffg('red_http', 'red')
+ self.check_deletion()
+
+ self.create_chain(self.testcase_config)
+
+ t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.compute_host,
+ [self.neutron_port],))
+ try:
+ t2.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
+
+ logger.info("Starting SSH firewall on %s" % self.fips_sfs[0])
+ test_utils.start_vxlan_tool(self.fips_sfs[0])
+
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t2.join()
+
+ logger.info("Test HTTP")
+ results = self.present_results_allowed_http()
+
+ self.vxlan_blocking_start(self.fips_sfs[0], "80")
+
+ logger.info("Test HTTP again")
+ results = self.present_results_http()
+
+ if __name__ == '__main__':
+ return results.compile_summary(), self.creators
+
+ if __name__ == 'sfc.tests.functest.sfc_chain_deletion':
+ return results.compile_summary(), self.creators
+
+
+if __name__ == '__main__':
+
+ TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_chain_deletion')
+ supported_installers = ['fuel', 'apex', 'osa', 'compass']
+ vnf_names = ['testVNF1', 'testVNF2']
+
+ test_run = SfcChainDeletion(TESTCASE_CONFIG, supported_installers,
+ vnf_names)
+ test_run.run()
diff --git a/sfc/tests/functest/sfc_one_chain_two_service_functions.py b/sfc/tests/functest/sfc_one_chain_two_service_functions.py
index 043b5a6a..eeb2ec8c 100644
--- a/sfc/tests/functest/sfc_one_chain_two_service_functions.py
+++ b/sfc/tests/functest/sfc_one_chain_two_service_functions.py
@@ -7,277 +7,83 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-import os
-import sys
import threading
import logging
-
-import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
-import opnfv.utils.ovs_logger as ovs_log
-
import sfc.lib.config as sfc_config
-import sfc.lib.test_utils as test_utils
-from sfc.lib.results import Results
-from opnfv.deployment.factory import Factory as DeploymentFactory
-import sfc.lib.topology_shuffler as topo_shuffler
+from sfc.tests.functest import sfc_parent_function
""" logging configuration """
logger = logging.getLogger(__name__)
-CLIENT = "client"
-SERVER = "server"
-COMMON_CONFIG = sfc_config.CommonConfig()
-TESTCASE_CONFIG = sfc_config.TestcaseConfig(
- 'sfc_one_chain_two_service'
- '_functions')
-
-
-def main():
- deploymentHandler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
-
- installer_type = os.environ.get("INSTALLER_TYPE")
- supported_installers = ['fuel', 'apex', 'osa']
+class SfcOneChainTwoServiceTC(sfc_parent_function.SfcCommonTestCase):
+ """We create one client and one server using nova.
+ Then, 2 SFs are created using tacker.
+ A chain is created where both SFs are included.
+ The vxlan tool is used on both SFs. The purpose is to
+ check different HTTP traffic combinations.
+ """
+ def run(self):
- if installer_type not in supported_installers:
- logger.error(
- '\033[91mYour installer is not supported yet\033[0m')
- sys.exit(1)
+ logger.info("The test scenario %s is starting", __name__)
+ self.create_custom_vnfd(self.testcase_config.test_vnfd_red,
+ 'test-vnfd1')
+ self.create_custom_vnfd(self.testcase_config.test_vnfd_blue,
+ 'test-vnfd2')
- installer_ip = os.environ.get("INSTALLER_IP")
- if not installer_ip:
- logger.error(
- '\033[91minstaller ip is not set\033[0m')
- logger.error(
- '\033[91mexport INSTALLER_IP=<ip>\033[0m')
- sys.exit(1)
+ self.create_custom_av(self.vnfs[0], 'test-vnfd1', 'test-vim')
+ self.create_custom_av(self.vnfs[1], 'test-vnfd2', 'test-vim')
- cluster = COMMON_CONFIG.installer_cluster
+ self.create_vnffg(self.testcase_config.test_vnffgd_red, 'red',
+ 'red_http')
+ # Start measuring the time it takes to implement the
+ # classification rules
+ t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.compute_host,
+ [self.neutron_port],))
+ try:
+ t1.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
- openstack_nodes = (deploymentHandler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deploymentHandler.get_nodes())
+ self.assign_floating_ip_client_server()
- controller_nodes = [node for node in openstack_nodes
- if node.is_controller()]
- compute_nodes = [node for node in openstack_nodes
- if node.is_compute()]
+ self.assign_floating_ip_sfs()
- odl_ip, odl_port = odl_utils.get_odl_ip_port(openstack_nodes)
+ self.check_floating_ips()
+ self.start_services_in_vm()
- for compute in compute_nodes:
- logger.info("This is a compute: %s" % compute.ip)
+ t1.join()
- results = Results(COMMON_CONFIG.line_length)
- results.add_to_summary(0, "=")
- results.add_to_summary(2, "STATUS", "SUBTEST")
- results.add_to_summary(0, "=")
+ logger.info("Allowed HTTP scenario")
+ results = self.present_results_allowed_http()
- openstack_sfc = os_sfc_utils.OpenStackSFC()
+ self.vxlan_blocking_start(self.fips_sfs[0], "80")
+ results = self.present_results_http()
- custom_flv = openstack_sfc.create_flavor(
- COMMON_CONFIG.flavor,
- COMMON_CONFIG.ram_size_in_mb,
- COMMON_CONFIG.disk_size_in_gb,
- COMMON_CONFIG.vcpu_count)
- if not custom_flv:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
+ self.vxlan_blocking_start(self.fips_sfs[1], "80")
+ self.vxlan_blocking_stop(self.fips_sfs[0])
- tacker_client = os_sfc_utils.get_tacker_client()
+ results = self.present_results_http()
- controller_clients = test_utils.get_ssh_clients(controller_nodes)
- compute_clients = test_utils.get_ssh_clients(compute_nodes)
+ if __name__ == '__main__':
+ return results.compile_summary(), self.creators
- ovs_logger = ovs_log.OVSLogger(
- os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
- COMMON_CONFIG.functest_results_dir)
+ if __name__ == \
+ 'sfc.tests.functest.sfc_one_chain_two_service_functions':
+ return results.compile_summary(), self.creators
- image_creator = openstack_sfc.register_glance_image(
- COMMON_CONFIG.image_name,
- COMMON_CONFIG.image_url,
- COMMON_CONFIG.image_format,
- 'public')
- network, router = openstack_sfc.create_network_infrastructure(
- TESTCASE_CONFIG.net_name,
- TESTCASE_CONFIG.subnet_name,
- TESTCASE_CONFIG.subnet_cidr,
- TESTCASE_CONFIG.router_name)
-
- sg = openstack_sfc.create_security_group(TESTCASE_CONFIG.secgroup_name)
+if __name__ == '__main__':
+ TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_one_chain_two_service'
+ '_functions')
+ supported_installers = ['fuel', 'apex', 'osa', 'compass']
vnfs = ['testVNF1', 'testVNF2']
- topo_seed = topo_shuffler.get_seed()
- testTopology = topo_shuffler.topology(vnfs, openstack_sfc, seed=topo_seed)
-
- logger.info('This test is run with the topology {0}'
- .format(testTopology['id']))
- logger.info('Topology description: {0}'
- .format(testTopology['description']))
-
- client_instance, client_creator = openstack_sfc.create_instance(
- CLIENT, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['client'])
-
- server_instance, server_creator = openstack_sfc.create_instance(
- SERVER, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['server'])
-
- server_ip = server_instance.ports[0].ips[0]['ip_address']
- logger.info("Server instance received private ip [{}]".format(server_ip))
-
- os_sfc_utils.register_vim(tacker_client, vim_file=COMMON_CONFIG.vim_file)
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_red)
-
- os_sfc_utils.create_vnfd(
- tacker_client,
- tosca_file=tosca_file, vnfd_name='test-vnfd1')
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_blue)
- os_sfc_utils.create_vnfd(
- tacker_client,
- tosca_file=tosca_file, vnfd_name='test-vnfd2')
-
- default_param_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- COMMON_CONFIG.vnfd_default_params_file)
-
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client, vnfs[0], 'test-vnfd1', 'test-vim',
- default_param_file, testTopology[vnfs[0]])
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client, vnfs[1], 'test-vnfd2', 'test-vim',
- default_param_file, testTopology[vnfs[1]])
-
- vnf1_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnfs[0])
- vnf2_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnfs[1])
- if vnf1_id is None or vnf2_id is None:
- logger.error('ERROR while booting vnfs')
- sys.exit(1)
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- TESTCASE_CONFIG.test_vnffgd_red)
-
- os_sfc_utils.create_vnffgd(tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='red')
-
- neutron_port = openstack_sfc.get_client_port_id(client_instance)
- os_sfc_utils.create_vnffg_with_param_file(tacker_client, 'red',
- 'red_http',
- default_param_file,
- neutron_port.id)
-
- # Start measuring the time it takes to implement the classification rules
- t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip, odl_port,))
- try:
- t1.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
-
- logger.info("Assigning floating IPs to instances")
- client_floating_ip = openstack_sfc.assign_floating_ip(router,
- client_instance,
- client_creator)
- server_floating_ip = openstack_sfc.assign_floating_ip(router,
- server_instance,
- server_creator)
- fips_sfs = openstack_sfc.assign_floating_ip_vnfs(router)
- sf1_floating_ip = fips_sfs[0]
- sf2_floating_ip = fips_sfs[1]
-
- fips = [client_floating_ip, server_floating_ip, sf1_floating_ip,
- sf2_floating_ip]
-
- for ip in fips:
- logger.info("Checking connectivity towards floating IP [%s]" % ip)
- if not test_utils.ping(ip, retries=50, retry_timeout=3):
- logger.error("Cannot ping floating IP [%s]" % ip)
- os_sfc_utils.get_tacker_items()
- odl_utils.get_odl_items(odl_ip, odl_port)
- sys.exit(1)
- logger.info("Successful ping to floating IP [%s]" % ip)
-
- if not test_utils.check_ssh([sf1_floating_ip, sf2_floating_ip]):
- logger.error("Cannot establish SSH connection to the SFs")
- sys.exit(1)
-
- logger.info("Starting HTTP server on %s" % server_floating_ip)
- if not test_utils.start_http_server(server_floating_ip):
- logger.error(
- 'Failed to start HTTP server on %s' % server_floating_ip)
- sys.exit(1)
-
- for sf_floating_ip in (sf1_floating_ip, sf2_floating_ip):
- logger.info("Starting vxlan_tool on %s" % sf_floating_ip)
- test_utils.start_vxlan_tool(sf_floating_ip)
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- t1.join()
-
- logger.info("Test HTTP")
- if not test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP works")
- else:
- error = ('\033[91mTEST 1 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP blocked")
-
- logger.info("Changing the vxlan_tool to block HTTP traffic")
-
- # Make SF1 block http traffic
- test_utils.stop_vxlan_tool(sf1_floating_ip)
- logger.info("Starting HTTP firewall on %s" % sf1_floating_ip)
- test_utils.start_vxlan_tool(sf1_floating_ip, block="80")
-
- logger.info("Test HTTP again blocking SF1")
- if test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
- else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP not blocked")
-
- # Make SF2 block http traffic
- test_utils.stop_vxlan_tool(sf2_floating_ip)
- logger.info("Starting HTTP firewall on %s" % sf2_floating_ip)
- test_utils.start_vxlan_tool(sf2_floating_ip, block="80")
- logger.info("Stopping HTTP firewall on %s" % sf1_floating_ip)
- test_utils.stop_vxlan_tool(sf1_floating_ip)
-
- logger.info("Test HTTP again blocking SF2")
- if test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
- else:
- error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP not blocked")
-
- return results.compile_summary(), openstack_sfc.creators
-
-
-if __name__ == '__main__':
- logging.config.fileConfig(COMMON_CONFIG.functest_logging_api)
- main()
+ test_run = SfcOneChainTwoServiceTC(TESTCASE_CONFIG, supported_installers,
+ vnfs)
+ test_run.run()
diff --git a/sfc/tests/functest/sfc_parent_function.py b/sfc/tests/functest/sfc_parent_function.py
new file mode 100644
index 00000000..a16fa6a7
--- /dev/null
+++ b/sfc/tests/functest/sfc_parent_function.py
@@ -0,0 +1,530 @@
+import os
+import sys
+import logging
+import sfc.lib.test_utils as test_utils
+import sfc.lib.openstack_utils as os_sfc_utils
+import sfc.lib.topology_shuffler as topo_shuffler
+
+from opnfv.utils import ovs_logger as ovs_log
+from opnfv.deployment.factory import Factory as DeploymentFactory
+from sfc.lib import config as sfc_config
+from sfc.lib import odl_utils as odl_utils
+from sfc.lib.results import Results
+
+
+logger = logging.getLogger(__name__)
+CLIENT = "client"
+SERVER = "server"
+openstack_sfc = os_sfc_utils.OpenStackSFC()
+COMMON_CONFIG = sfc_config.CommonConfig()
+results = Results(COMMON_CONFIG.line_length)
+
+
+class SfcCommonTestCase(object):
+
+ def __init__(self, testcase_config, supported_installers, vnfs):
+
+ self.compute_nodes = None
+ self.controller_clients = None
+ self.compute_clients = None
+ self.tacker_client = None
+ self.ovs_logger = None
+ self.network = None
+ self.router = None
+ self.sg = None
+ self.image_creator = None
+ self.vnf_image_creator = None
+ self.creators = None
+ self.odl_ip = None
+ self.odl_port = None
+ self.default_param_file = None
+ self.topo_seed = None
+ self.test_topology = None
+ self.server_instance = None
+ self.server_creator = None
+ self.client_instance = None
+ self.client_creator = None
+ self.server_ip = None
+ self.vnf_id = None
+ self.client_floating_ip = None
+ self.server_floating_ip = None
+ self.fips_sfs = None
+ self.neutron_port = None
+ self.testcase_config = testcase_config
+ self.vnfs = vnfs
+
+ self.prepare_env(testcase_config, supported_installers, vnfs)
+
+ def prepare_env(self, testcase_config, supported_installers, vnfs):
+ """Prepare the testcase environment and the components
+ that the test scenario is going to use later on.
+
+ :param testcase_config: the input test config file
+ :param supported_installers: the supported installers for this tc
+ :param vnfs: the names of vnfs
+ :return: Environment preparation
+ """
+
+ deployment_handler = DeploymentFactory.get_handler(
+ COMMON_CONFIG.installer_type,
+ COMMON_CONFIG.installer_ip,
+ COMMON_CONFIG.installer_user,
+ COMMON_CONFIG.installer_password,
+ COMMON_CONFIG.installer_key_file)
+
+ installer_type = os.environ.get("INSTALLER_TYPE")
+
+ if installer_type not in supported_installers:
+ logger.error(
+ '\033[91mYour installer is not supported yet\033[0m')
+ sys.exit(1)
+
+ installer_ip = os.environ.get("INSTALLER_IP")
+ if not installer_ip:
+ logger.error(
+ '\033[91minstaller ip is not set\033[0m')
+ logger.error(
+ '\033[91mexport INSTALLER_IP=<ip>\033[0m')
+ sys.exit(1)
+
+ cluster = COMMON_CONFIG.installer_cluster
+ openstack_nodes = (deployment_handler.get_nodes({'cluster': cluster})
+ if cluster is not None
+ else deployment_handler.get_nodes())
+
+ self.compute_nodes = [node for node in openstack_nodes
+ if node.is_compute()]
+
+ for compute in self.compute_nodes:
+ logger.info("This is a compute: %s" % compute.ip)
+
+ results.add_to_summary(0, "=")
+ results.add_to_summary(2, "STATUS", "SUBTEST")
+ results.add_to_summary(0, "=")
+
+ custom_flv = openstack_sfc.create_flavor(
+ COMMON_CONFIG.flavor,
+ COMMON_CONFIG.ram_size_in_mb,
+ COMMON_CONFIG.disk_size_in_gb,
+ COMMON_CONFIG.vcpu_count)
+ if not custom_flv:
+ logger.error("Failed to create custom flavor")
+ sys.exit(1)
+
+ controller_nodes = [node for node in openstack_nodes
+ if node.is_controller()]
+
+ self.controller_clients = test_utils.get_ssh_clients(controller_nodes)
+ self.compute_clients = test_utils.get_ssh_clients(self.compute_nodes)
+
+ self.tacker_client = os_sfc_utils.get_tacker_client()
+ os_sfc_utils.register_vim(self.tacker_client,
+ vim_file=COMMON_CONFIG.vim_file)
+
+ self.ovs_logger = ovs_log.OVSLogger(
+ os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
+ COMMON_CONFIG.functest_results_dir)
+
+ self.network, self.router = openstack_sfc.\
+ create_network_infrastructure(testcase_config.net_name,
+ testcase_config.subnet_name,
+ testcase_config.subnet_cidr,
+ testcase_config.router_name)
+
+ self.sg = openstack_sfc.create_security_group(
+ testcase_config.secgroup_name)
+
+ # Image for the vnf is registered
+ self.vnf_image_creator = openstack_sfc.register_glance_image(
+ COMMON_CONFIG.vnf_image_name,
+ COMMON_CONFIG.vnf_image_url,
+ COMMON_CONFIG.vnf_image_format,
+ 'public')
+
+ # Image for the client/server is registered
+ self.image_creator = openstack_sfc.register_glance_image(
+ COMMON_CONFIG.image_name,
+ COMMON_CONFIG.image_url,
+ COMMON_CONFIG.image_format,
+ 'public')
+
+ self.creators = openstack_sfc.creators
+
+ self.odl_ip, self.odl_port = odl_utils.get_odl_ip_port(openstack_nodes)
+
+ self.default_param_file = os.path.join(
+ COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnfd_dir,
+ COMMON_CONFIG.vnfd_default_params_file)
+
+ self.topo_seed = topo_shuffler.get_seed()
+ self.test_topology = topo_shuffler.topology(vnfs, openstack_sfc,
+ seed=self.topo_seed)
+
+ logger.info('This test is run with the topology {0}'
+ .format(self.test_topology['id']))
+ logger.info('Topology description: {0}'
+ .format(self.test_topology['description']))
+
+ self.server_instance, self.server_creator = \
+ openstack_sfc.create_instance(SERVER, COMMON_CONFIG.flavor,
+ self.image_creator, self.network,
+ self.sg,
+ av_zone=self.test_topology['server'])
+
+ self.client_instance, self.client_creator = \
+ openstack_sfc.create_instance(CLIENT, COMMON_CONFIG.flavor,
+ self.image_creator, self.network,
+ self.sg,
+ av_zone=self.test_topology['client'])
+ logger.info('This test is run with the topology {0}'.format(
+ self.test_topology['id']))
+ logger.info('Topology description: {0}'.format(
+ self.test_topology['description']))
+
+ self.server_ip = self.server_instance.ports[0].ips[0]['ip_address']
+ logger.info("Server instance received private ip [{}]".format(
+ self.server_ip))
+
+ def create_custom_vnfd(self, test_case_name, vnfd_name):
+ """Create VNF Descriptor (VNFD)
+
+ :param test_case_name: the name of test case
+ :param vnfd_name: the name of vnfd
+ :return: vnfd
+ """
+
+ tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnfd_dir, test_case_name)
+
+ os_sfc_utils.create_vnfd(self.tacker_client,
+ tosca_file=tosca_file,
+ vnfd_name=vnfd_name)
+
+ def create_custom_av(self, vnf_names, av_member1, av_member2):
+ """Create custom 'av'
+
+ :param vnf_names: names of available vnf(s)
+ :param av_member1: the first member of av zone
+ :param av_member2: the second member of av zone
+ :return: av zone
+ """
+
+ logger.info('This test is run with the topology {0}'.
+ format(self.test_topology['id']))
+ logger.info('Topology description: {0}'
+ .format(self.test_topology['description']))
+
+ os_sfc_utils.create_vnf_in_av_zone(
+ self.tacker_client, vnf_names, av_member1, av_member2,
+ self.default_param_file, self.test_topology[vnf_names])
+
+ self.vnf_id = os_sfc_utils.wait_for_vnf(self.tacker_client,
+ vnf_name=vnf_names)
+ if self.vnf_id is None:
+ logger.error('ERROR while booting vnfs')
+ sys.exit(1)
+
+ def assign_floating_ip_client_server(self):
+ """Assign floating IPs on the router about server and the client
+ instances
+
+ :return: Floating IPs for client and server
+ """
+
+ logger.info("Assigning floating IPs to client and server instances")
+
+ self.client_floating_ip = openstack_sfc.assign_floating_ip(
+ self.router, self.client_instance, self.client_creator)
+ self.server_floating_ip = openstack_sfc.assign_floating_ip(
+ self.router, self.server_instance, self.server_creator)
+
+ def assign_floating_ip_sfs(self, vnf_ip=None):
+ """Assign floating IPs to service function
+
+ :param vnf_ip: IP of vnf - optional
+ :return: The list fips_sfs consist of the available IPs for service
+ functions
+ """
+
+ logger.info("Assigning floating IPs to service functions")
+
+ self.fips_sfs = openstack_sfc.assign_floating_ip_vnfs(self.router,
+ vnf_ip)
+
+ def check_floating_ips(self):
+ """Check the responsivness of the floating IPs
+
+ :return: The responsivness of IPs in the fips_sfs list is checked
+ """
+
+ fips = [self.client_floating_ip, self.server_floating_ip]
+
+ for sf in self.fips_sfs:
+ fips.append(sf)
+
+ for ip in fips:
+ logger.info("Checking connectivity towards floating IP [%s]" % ip)
+ if not test_utils.ping(ip, retries=50, retry_timeout=3):
+ logger.error("Cannot ping floating IP [%s]" % ip)
+ os_sfc_utils.get_tacker_items()
+ odl_utils.get_odl_items(self.odl_ip, self.odl_port)
+ sys.exit(1)
+ logger.info("Successful ping to floating IP [%s]" % ip)
+
+ if not test_utils.check_ssh(self.fips_sfs):
+ logger.error("Cannot establish SSH connection to the SFs")
+ sys.exit(1)
+
+ def start_services_in_vm(self):
+ """Start the HTTP server in the server VM as well as the vxlan tool for
+ the SFs IPs included in the fips_sfs list
+
+ :return: HTTP server and vxlan tools are started
+ """
+
+ logger.info("Starting HTTP server on %s" % self.server_floating_ip)
+ if not test_utils.start_http_server(self.server_floating_ip):
+ logger.error('\033[91mFailed to start HTTP server on %s\033[0m'
+ % self.server_floating_ip)
+ sys.exit(1)
+
+ for sf_floating_ip in self.fips_sfs:
+ logger.info("Starting vxlan_tool on %s" % sf_floating_ip)
+ test_utils.start_vxlan_tool(sf_floating_ip)
+
+ def present_results_ssh(self):
+ """Check whether the connection between server and client using
+ SSH protocol is blocked or not.
+
+ :return: The results for the specific action of the scenario
+ """
+
+ logger.info("Test SSH")
+ if test_utils.is_ssh_blocked(self.client_floating_ip, self.server_ip):
+ results.add_to_summary(2, "PASS", "SSH Blocked")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> SSH NOT BLOCKED\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "SSH Works")
+
+ return results
+
+ def present_results_allowed_ssh(self):
+ """Check whether the connection between server and client using
+ SSH protocol is available or not.
+
+ :return: The results for the specific action of the scenario
+ """
+
+ logger.info("Test SSH")
+ if not test_utils.is_ssh_blocked(self.client_floating_ip,
+ self.server_ip):
+ results.add_to_summary(2, "PASS", "SSH works")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> SSH BLOCKED\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "SSH is blocked")
+
+ return results
+
+ def remove_vnffg(self, par_vnffg_name, par_vnffgd_name):
+ """Delete the vnffg and the vnffgd items that have been created
+ during the test scenario.
+
+ :param par_vnffg_name: The vnffg name of network components
+ :param par_vnffgd_name: The vnffgd name of network components
+ :return: Remove the vnffg and vnffgd components
+ """
+
+ os_sfc_utils.delete_vnffg(self.tacker_client,
+ vnffg_name=par_vnffg_name)
+
+ os_sfc_utils.delete_vnffgd(self.tacker_client,
+ vnffgd_name=par_vnffgd_name)
+
+ def create_vnffg(self, testcase_config_name, vnf_name, conn_name):
+ """Create the vnffg components following the instructions from
+ relevant templates.
+
+ :param testcase_config_name: The config input of the test case
+ :param vnf_name: The name of the vnf
+ :param conn_name: Protocol type / name of the component
+ :return: Create the vnffg component
+ """
+
+ tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnffgd_dir,
+ testcase_config_name)
+
+ os_sfc_utils.create_vnffgd(self.tacker_client,
+ tosca_file=tosca_file,
+ vnffgd_name=vnf_name)
+
+ self.neutron_port = openstack_sfc.get_client_port(self.client_instance,
+ self.client_creator)
+ os_sfc_utils.create_vnffg_with_param_file(self.tacker_client, vnf_name,
+ conn_name,
+ self.default_param_file,
+ self.neutron_port.id)
+
+ def present_results_http(self):
+ """Check whether the connection between server and client using
+ HTTP protocol is blocked or not.
+
+ :return: The results for the specific action of the scenario
+ """
+
+ logger.info("Test HTTP")
+ if test_utils.is_http_blocked(self.client_floating_ip, self.server_ip):
+ results.add_to_summary(2, "PASS", "HTTP Blocked")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> HTTP WORKS\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "HTTP works")
+
+ return results
+
+ def present_results_allowed_port_http(self, testcase_config):
+ """Check whether the connection between server and client using
+ HTTP protocol and for a specific port is available or not.
+
+ :param testcase_config: The config input of the test case
+ :return: The results for the specific action of the scenario
+ """
+
+ allowed_port = testcase_config.source_port
+ logger.info("Test if HTTP from port %s works" % allowed_port)
+ if not test_utils.is_http_blocked(
+ self.client_floating_ip, self.server_ip, allowed_port):
+ results.add_to_summary(2, "PASS", "HTTP works")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> HTTP BLOCKED\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "HTTP is blocked")
+
+ return results
+
+ def present_results_blocked_port_http(self, testcase_config,
+ test='HTTP'):
+ """Check whether the connection between server and client using
+ HTTP protocol and for a specific port is blocked or not.
+
+ :param testcase_config: The config input of the test case
+ :param test: custom test string to print on result summary
+ :return: The results for the specific action of the scenario
+ """
+
+ allowed_port = testcase_config.source_port
+ logger.info("Test if HTTP from port %s doesn't work" % allowed_port)
+ if test_utils.is_http_blocked(
+ self.client_floating_ip, self.server_ip, allowed_port):
+ results.add_to_summary(2, "PASS", test + " blocked")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> HTTP WORKS\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", test + " works")
+
+ return results
+
+ def create_chain(self, testcase_config):
+ """Create a connection chain for the test scenario purposes
+
+ :param testcase_config: The config input of the test case
+ :return: Create the proper chain for the specific test scenario
+ """
+
+ self.neutron_port = openstack_sfc.get_client_port(self.client_instance,
+ self.client_creator)
+ odl_utils.create_chain(self.tacker_client, self.default_param_file,
+ self.neutron_port, COMMON_CONFIG,
+ testcase_config)
+
+ def check_deletion(self):
+ """Check that the deletion of the chain has been completed sucessfully.
+
+ :return: Check that the chain has been completed deleted without
+ leftovers.
+ """
+
+ if not odl_utils.\
+ check_vnffg_deletion(self.odl_ip, self.odl_port,
+ self.ovs_logger,
+ [self.neutron_port],
+ self.client_instance.compute_host,
+ self.compute_nodes):
+ logger.debug("The chains were not correctly removed")
+ raise Exception("Chains not correctly removed, test failed")
+
+ def present_results_allowed_http(self):
+ """Check whether the connection between server and client using
+ HTTP protocol is available or not.
+
+ :return: The results for the specific action of the scenario
+ """
+
+ if not test_utils.is_http_blocked(self.client_floating_ip,
+ self.server_ip):
+ results.add_to_summary(2, "PASS", "HTTP works")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> HTTP BLOCKED\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "HTTP is blocked")
+
+ return results
+
+ def vxlan_blocking_start(self, floating_ip, port_blocked):
+ """Start the vxlan tool for one floating IP and blocking
+ a specific port.
+
+ :param floating_ip: Floating IP
+ :param port_blocked: Port
+ :return: The port for the floating IP is blocked
+ """
+
+ test_utils.stop_vxlan_tool(floating_ip)
+ logger.info("Starting HTTP firewall on %s" % floating_ip)
+ test_utils.start_vxlan_tool(floating_ip, block=port_blocked)
+
+ def vxlan_blocking_stop(self, floating_ip):
+ """Stop the vxlan tool for a specific IP
+
+ :param floating_ip: Floating IP
+ :return: The vxlan tool for the specific floating IP is stopped
+ """
+
+ logger.info("Starting HTTP firewall on %s" % floating_ip)
+ test_utils.stop_vxlan_tool(floating_ip)
+
+ def vxlan_start_interface(self, floating_ip, interface, output, block):
+ """Start the vxlan tool for one floating IP and blocking
+ a specific interface.
+
+ :param floating_ip: Floating IP
+ :param interface: Interface
+ :param output: output interface
+ :param block: port
+ :return: The interface or/and port for specific floating IP is blocked
+ """
+
+ logger.info("Starting vxlan_tool on %s" % floating_ip)
+ test_utils.start_vxlan_tool(floating_ip, interface=interface,
+ output=output, block=block)
diff --git a/sfc/tests/functest/sfc_symmetric_chain.py b/sfc/tests/functest/sfc_symmetric_chain.py
index b8d35514..067854db 100644
--- a/sfc/tests/functest/sfc_symmetric_chain.py
+++ b/sfc/tests/functest/sfc_symmetric_chain.py
@@ -1,4 +1,4 @@
-#!/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 Ericsson AB and others. All rights reserved
#
@@ -8,228 +8,172 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-
import os
import sys
import threading
import logging
-
import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
-import opnfv.utils.ovs_logger as ovs_log
-from opnfv.deployment.factory import Factory as DeploymentFactory
-
import sfc.lib.config as sfc_config
-import sfc.lib.utils as test_utils
-from sfc.lib.results import Results
-import sfc.lib.topology_shuffler as topo_shuffler
-
+from sfc.tests.functest import sfc_parent_function
+""" logging configuration """
logger = logging.getLogger(__name__)
-
+COMMON_CONFIG = sfc_config.CommonConfig()
CLIENT = "client"
SERVER = "server"
-COMMON_CONFIG = sfc_config.CommonConfig()
-TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_symmetric_chain')
-
-
-def main():
- deploymentHandler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
-
- cluster = COMMON_CONFIG.installer_cluster
- all_nodes = (deploymentHandler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deploymentHandler.get_nodes())
-
- controller_nodes = [node for node in all_nodes if node.is_controller()]
- compute_nodes = [node for node in all_nodes if node.is_compute()]
-
- odl_ip, odl_port = odl_utils.get_odl_ip_port(all_nodes)
-
- results = Results(COMMON_CONFIG.line_length)
- results.add_to_summary(0, "=")
- results.add_to_summary(2, "STATUS", "SUBTEST")
- results.add_to_summary(0, "=")
-
- openstack_sfc = os_sfc_utils.OpenStackSFC()
-
- tacker_client = os_sfc_utils.get_tacker_client()
-
- _, custom_flavor = openstack_sfc.get_or_create_flavor(
- COMMON_CONFIG.flavor,
- COMMON_CONFIG.ram_size_in_mb,
- COMMON_CONFIG.disk_size_in_gb,
- COMMON_CONFIG.vcpu_count)
- if custom_flavor is None:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
-
- controller_clients = test_utils.get_ssh_clients(controller_nodes)
- compute_clients = test_utils.get_ssh_clients(compute_nodes)
-
- ovs_logger = ovs_log.OVSLogger(
- os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
- COMMON_CONFIG.functest_results_dir)
-
- image_creator = openstack_sfc.register_glance_image(
- COMMON_CONFIG.image_name,
- COMMON_CONFIG.image_url,
- COMMON_CONFIG.image_format,
- 'public')
-
- network, router = openstack_sfc.create_network_infrastructure(
- TESTCASE_CONFIG.net_name,
- TESTCASE_CONFIG.subnet_name,
- TESTCASE_CONFIG.subnet_cidr,
- TESTCASE_CONFIG.router_name)
-
- sg = openstack_sfc.create_security_group(TESTCASE_CONFIG.secgroup_name)
-
- vnf_name = 'testVNF1'
- # Using seed=0 uses the baseline topology: everything in the same host
- testTopology = topo_shuffler.topology([vnf_name], openstack_sfc, seed=0)
- logger.info('This test is run with the topology {0}'
- .format(testTopology['id']))
- logger.info('Topology description: {0}'
- .format(testTopology['description']))
-
- client_instance, client_creator = openstack_sfc.create_instance(
- CLIENT, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['client'])
-
- server_instance, server_creator = openstack_sfc.create_instance(
- SERVER, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['server'])
-
- server_ip = server_instance.ports[0].ips[0]['ip_address']
- logger.info("Server instance received private ip [{}]".format(server_ip))
-
- tosca_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd)
-
- default_param_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- COMMON_CONFIG.vnfd_default_params_file)
-
- os_sfc_utils.create_vnfd(tacker_client, tosca_file=tosca_file)
- test_utils.create_vnf_in_av_zone(
- tacker_client,
- vnf_name,
- 'test-vnfd1',
- default_param_file,
- testTopology[vnf_name])
-
- vnf_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnf_name)
- if vnf_id is None:
- logger.error('ERROR while booting VNF')
- sys.exit(1)
-
- os_sfc_utils.create_sfc(
- tacker_client,
- sfc_name='red',
- chain_vnf_names=[vnf_name],
- symmetrical=True)
-
- os_sfc_utils.create_sfc_classifier(
- tacker_client, 'red_http', sfc_name='red',
- match={
- 'source_port': 0,
- 'dest_port': 80,
- 'protocol': 6
- })
-
- # FIXME: JIRA SFC-86
- # Tacker does not allow to specify the direction of the chain to be used,
- # only references the SFP (which for symmetric chains results in two RSPs)
- os_sfc_utils.create_sfc_classifier(
- tacker_client, 'red_http_reverse', sfc_name='red',
- match={
- 'source_port': 80,
- 'dest_port': 0,
- 'protocol': 6
- })
-
- logger.info(test_utils.run_cmd('tacker sfc-list'))
- logger.info(test_utils.run_cmd('tacker sfc-classifier-list'))
-
- # Start measuring the time it takes to implement the classification rules
- t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip, odl_port,))
-
- try:
- t1.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
-
- logger.info("Assigning floating IPs to instances")
- client_floating_ip = openstack_sfc.assign_floating_ip(router,
- client_instance,
- client_creator)
- server_floating_ip = openstack_sfc.assign_floating_ip(router,
- server_instance,
- server_creator)
- fips_sfs = openstack_sfc.assign_floating_ip_vnfs(router)
- sf_floating_ip = fips_sfs[0]
-
- fips = [client_floating_ip, server_floating_ip, fips_sfs[0]]
-
- for ip in fips:
- logger.info("Checking connectivity towards floating IP [%s]" % ip)
- if not test_utils.ping(ip, retries=50, retry_timeout=3):
- logger.error("Cannot ping floating IP [%s]" % ip)
- sys.exit(1)
- logger.info("Successful ping to floating IP [%s]" % ip)
+openstack_sfc = os_sfc_utils.OpenStackSFC()
- if not test_utils.check_ssh([sf_floating_ip]):
- logger.error("Cannot establish SSH connection to the SFs")
- sys.exit(1)
- logger.info("Starting HTTP server on %s" % server_floating_ip)
- if not test_utils.start_http_server(server_floating_ip):
- logger.error('\033[91mFailed to start the HTTP server\033[0m')
- sys.exit(1)
+class SfcSymmetricChain(sfc_parent_function.SfcCommonTestCase):
+ """One client and one server are created using nova.
+ The server will be running a web server on port 80.
+ Then one Service Function (SF) is created using Tacker.
+ This service function will be running a firewall that
+ blocks the traffic in a specific port.
+ A symmetric service chain routing the traffic throught
+ this SF will be created as well.
+ The purpose is to check different HTTP traffic
+ combinations using firewall.
+ """
- blocked_port = TESTCASE_CONFIG.blocked_source_port
- logger.info("Firewall started, blocking traffic port %d" % blocked_port)
- test_utils.start_vxlan_tool(sf_floating_ip, block=blocked_port)
+ def run(self):
- logger.info("Wait for ODL to update the classification rules in OVS")
- t1.join()
+ logger.info("The test scenario %s is starting", __name__)
+ self.create_custom_vnfd(self.testcase_config.test_vnfd, 'test-vnfd1')
+ self.create_custom_av(self.vnfs[0], 'test-vnfd1', 'test-vim')
- allowed_port = TESTCASE_CONFIG.allowed_source_port
- logger.info("Test if HTTP from port %s works" % allowed_port)
- if not test_utils.is_http_blocked(
- client_floating_ip, server_ip, allowed_port):
- results.add_to_summary(2, "PASS", "HTTP works")
- else:
- error = ('\033[91mTEST 1 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP works")
-
- logger.info("Test if HTTP from port %s is blocked" % blocked_port)
- if test_utils.is_http_blocked(
- client_floating_ip, server_ip, blocked_port):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
- else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP Blocked")
+ if self.vnf_id is None:
+ logger.error('ERROR while booting VNF')
+ sys.exit(1)
- return results.compile_summary(), openstack_sfc.creators
+ tosca_file = os.path.join(
+ COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnffgd_dir,
+ self.testcase_config.test_vnffgd)
+ os_sfc_utils.create_vnffgd(
+ self.tacker_client,
+ tosca_file=tosca_file,
+ vnffgd_name='test-vnffgd')
+
+ client_port = openstack_sfc.get_client_port(
+ self.client_instance,
+ self.client_creator)
+ server_port = openstack_sfc.get_client_port(
+ self.server_instance,
+ self.server_creator)
+
+ server_ip_prefix = self.server_ip + '/32'
+
+ default_param_file = os.path.join(
+ COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnfd_dir,
+ COMMON_CONFIG.vnfd_default_params_file)
+
+ os_sfc_utils.create_vnffg_with_param_file(
+ self.tacker_client,
+ 'test-vnffgd',
+ 'test-vnffg',
+ default_param_file,
+ client_port.id,
+ server_port.id,
+ server_ip_prefix)
+ # Start measuring the time it takes to implement the classification
+ # rules
+ t1 = threading.Thread(target=wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.server_instance.compute_host,
+ server_port,
+ self.client_instance.compute_host,
+ client_port, self.odl_ip,
+ self.odl_port,))
+
+ try:
+ t1.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
+
+ logger.info("Assigning floating IPs to instances")
+ self.assign_floating_ip_client_server()
+
+ vnf_ip = os_sfc_utils.get_vnf_ip(self.tacker_client,
+ vnf_id=self.vnf_id)
+ self.assign_floating_ip_sfs(vnf_ip)
+
+ self.check_floating_ips()
+
+ self.start_services_in_vm()
+
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
+
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t1.join()
+
+ results = self.present_results_allowed_port_http(self.testcase_config)
+
+ self.vxlan_blocking_stop(self.fips_sfs[0])
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', "80")
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
+
+ results = self.present_results_blocked_port_http(self.testcase_config,
+ 'HTTP uplink')
+
+ self.vxlan_blocking_stop(self.fips_sfs[0])
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0',
+ self.testcase_config.source_port)
+
+ results = self.present_results_blocked_port_http(self.testcase_config,
+ 'HTTP downlink')
+
+ self.vxlan_blocking_stop(self.fips_sfs[0])
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
+ results = self.present_results_allowed_http()
+
+ if __name__ == '__main__':
+ return results.compile_summary(), self.creators
+
+ if __name__ == 'sfc.tests.functest.sfc_symmetric_chain':
+ return results.compile_summary(), self.creators
+
+
+def wait_for_classification_rules(ovs_logger, compute_nodes,
+ server_compute, server_port,
+ client_compute, client_port,
+ odl_ip, odl_port):
+ if client_compute == server_compute:
+ odl_utils.wait_for_classification_rules(
+ ovs_logger,
+ compute_nodes,
+ odl_ip,
+ odl_port,
+ client_compute,
+ [server_port, client_port])
+ else:
+ odl_utils.wait_for_classification_rules(
+ ovs_logger,
+ compute_nodes,
+ odl_ip,
+ odl_port,
+ server_compute,
+ server_port)
+ odl_utils.wait_for_classification_rules(
+ ovs_logger,
+ compute_nodes,
+ odl_ip,
+ odl_port,
+ client_compute,
+ client_port)
if __name__ == '__main__':
- logging.config.fileConfig(COMMON_CONFIG.functest_logging_api)
- main()
+
+ TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_symmetric_chain')
+ supported_installers = ['fuel', 'apex', 'osa', 'compass']
+ vnf_names = ['testVNF1']
+
+ test_run = SfcSymmetricChain(TESTCASE_CONFIG, supported_installers,
+ vnf_names)
+ test_run.run()
diff --git a/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py b/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
index d7eb2994..56a434f1 100644
--- a/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
+++ b/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
@@ -1,4 +1,4 @@
-#!/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
@@ -8,302 +8,104 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
-import os
-import sys
import threading
import logging
-
-import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
-import opnfv.utils.ovs_logger as ovs_log
-
import sfc.lib.config as sfc_config
-import sfc.lib.test_utils as test_utils
-from sfc.lib.results import Results
-from opnfv.deployment.factory import Factory as DeploymentFactory
-import sfc.lib.topology_shuffler as topo_shuffler
-
+from sfc.tests.functest import sfc_parent_function
+""" logging configuration """
logger = logging.getLogger(__name__)
-CLIENT = "client"
-SERVER = "server"
-COMMON_CONFIG = sfc_config.CommonConfig()
-TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_two_chains_SSH_and_HTTP')
-
-
-def main():
- deploymentHandler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
-
- installer_type = os.environ.get("INSTALLER_TYPE")
-
- supported_installers = ['fuel', 'apex', 'osa']
-
- if installer_type not in supported_installers:
- logger.error(
- '\033[91mYour installer is not supported yet\033[0m')
- sys.exit(1)
-
- installer_ip = os.environ.get("INSTALLER_IP")
- if not installer_ip:
- logger.error(
- '\033[91minstaller ip is not set\033[0m')
- logger.error(
- '\033[91mexport INSTALLER_IP=<ip>\033[0m')
- sys.exit(1)
-
- cluster = COMMON_CONFIG.installer_cluster
- openstack_nodes = (deploymentHandler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deploymentHandler.get_nodes())
-
- controller_nodes = [node for node in openstack_nodes
- if node.is_controller()]
- compute_nodes = [node for node in openstack_nodes
- if node.is_compute()]
- odl_ip, odl_port = odl_utils.get_odl_ip_port(openstack_nodes)
+class SfcTwoChainsSSHandHTTP(sfc_parent_function.SfcCommonTestCase):
+ """We create one client and one server using nova.
+ Then, 2 SFs are created using tacker.
+ Two chains are created, having one SF each.
+ The vxlan tool is used on both SFs. The purpose is to
+ check different HTTP and SSH traffic combinations.
+ """
- for compute in compute_nodes:
- logger.info("This is a compute: %s" % compute.ip)
+ def run(self):
- results = Results(COMMON_CONFIG.line_length)
- results.add_to_summary(0, "=")
- results.add_to_summary(2, "STATUS", "SUBTEST")
- results.add_to_summary(0, "=")
-
- openstack_sfc = os_sfc_utils.OpenStackSFC()
-
- custom_flv = openstack_sfc.create_flavor(
- COMMON_CONFIG.flavor,
- COMMON_CONFIG.ram_size_in_mb,
- COMMON_CONFIG.disk_size_in_gb,
- COMMON_CONFIG.vcpu_count)
- if not custom_flv:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
-
- tacker_client = os_sfc_utils.get_tacker_client()
-
- controller_clients = test_utils.get_ssh_clients(controller_nodes)
- compute_clients = test_utils.get_ssh_clients(compute_nodes)
-
- ovs_logger = ovs_log.OVSLogger(
- os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
- COMMON_CONFIG.functest_results_dir)
-
- image_creator = openstack_sfc.register_glance_image(
- COMMON_CONFIG.image_name,
- COMMON_CONFIG.image_url,
- COMMON_CONFIG.image_format,
- 'public')
-
- network, router = openstack_sfc.create_network_infrastructure(
- TESTCASE_CONFIG.net_name,
- TESTCASE_CONFIG.subnet_name,
- TESTCASE_CONFIG.subnet_cidr,
- TESTCASE_CONFIG.router_name)
-
- sg = openstack_sfc.create_security_group(TESTCASE_CONFIG.secgroup_name)
-
- vnf_names = ['testVNF1', 'testVNF2']
+ logger.info("The test scenario %s is starting", __name__)
- topo_seed = topo_shuffler.get_seed() # change to None for nova av zone
- testTopology = topo_shuffler.topology(vnf_names, openstack_sfc,
- seed=topo_seed)
+ self.create_custom_vnfd(self.testcase_config.test_vnfd_red,
+ 'test-vnfd1')
+ self.create_custom_vnfd(self.testcase_config.test_vnfd_blue,
+ 'test-vnfd2')
- logger.info('This test is run with the topology {0}'
- .format(testTopology['id']))
- logger.info('Topology description: {0}'
- .format(testTopology['description']))
+ self.create_custom_av(self.vnfs[0], 'test-vnfd1', 'test-vim')
+ self.create_custom_av(self.vnfs[1], 'test-vnfd2', 'test-vim')
- client_instance, client_creator = openstack_sfc.create_instance(
- CLIENT, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['client'])
+ self.create_vnffg(self.testcase_config.test_vnffgd_red, 'red',
+ 'red_http')
- server_instance, server_creator = openstack_sfc.create_instance(
- SERVER, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['server'])
+ t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.compute_host,
+ [self.neutron_port],))
- server_ip = server_instance.ports[0].ips[0]['ip_address']
+ try:
+ t1.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
- os_sfc_utils.register_vim(tacker_client, vim_file=COMMON_CONFIG.vim_file)
+ logger.info("Assigning floating IPs to instances")
+ self.assign_floating_ip_client_server()
- tosca_red = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_red)
- os_sfc_utils.create_vnfd(tacker_client,
- tosca_file=tosca_red,
- vnfd_name='test-vnfd1')
+ self.assign_floating_ip_sfs()
- tosca_blue = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_blue)
+ self.check_floating_ips()
+ self.start_services_in_vm()
+ self.vxlan_blocking_start(self.fips_sfs[0], "22")
+ self.vxlan_blocking_start(self.fips_sfs[1], "80")
- os_sfc_utils.create_vnfd(tacker_client,
- tosca_file=tosca_blue,
- vnfd_name='test-vnfd2')
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t1.join()
- default_param_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- COMMON_CONFIG.vnfd_default_params_file)
+ results = self.present_results_ssh()
+ results = self.present_results_allowed_http()
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client, vnf_names[0], 'test-vnfd1', 'test-vim',
- default_param_file, testTopology[vnf_names[0]])
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client, vnf_names[1], 'test-vnfd2', 'test-vim',
- default_param_file, testTopology[vnf_names[1]])
+ logger.info("Changing the classification")
- vnf1_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnf_names[0])
- vnf2_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnf_names[1])
- if vnf1_id is None or vnf2_id is None:
- logger.error('ERROR while booting vnfs')
- sys.exit(1)
+ self.remove_vnffg('red_http', 'red')
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- TESTCASE_CONFIG.test_vnffgd_red)
+ self.create_vnffg(self.testcase_config.test_vnffgd_blue, 'blue',
+ 'blue_ssh')
- os_sfc_utils.create_vnffgd(tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='red')
+ # Start measuring the time it takes to implement the classification
+ # rules
+ t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.compute_host,
+ self.neutron_port,))
+ try:
+ t2.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
- neutron_port = openstack_sfc.get_client_port_id(client_instance)
- os_sfc_utils.create_vnffg_with_param_file(tacker_client, 'red',
- 'red_http',
- default_param_file,
- neutron_port.id)
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t2.join()
- # Start measuring the time it takes to implement the classification rules
- t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip, odl_port,))
+ results = self.present_results_http()
+ results = self.present_results_allowed_ssh()
- try:
- t1.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
+ if __name__ == '__main__':
+ return results.compile_summary(), self.creators
- logger.info("Assigning floating IPs to instances")
- client_floating_ip = openstack_sfc.assign_floating_ip(router,
- client_instance,
- client_creator)
- server_floating_ip = openstack_sfc.assign_floating_ip(router,
- server_instance,
- server_creator)
- fips_sfs = openstack_sfc.assign_floating_ip_vnfs(router)
- sf1_floating_ip = fips_sfs[0]
- sf2_floating_ip = fips_sfs[1]
+ if __name__ == 'sfc.tests.functest.sfc_two_chains_SSH_and_HTTP':
+ return results.compile_summary(), self.creators
- fips = [client_floating_ip, server_floating_ip, sf1_floating_ip,
- sf2_floating_ip]
- for ip in fips:
- logger.info("Checking connectivity towards floating IP [%s]" % ip)
- if not test_utils.ping(ip, retries=50, retry_timeout=3):
- logger.error("Cannot ping floating IP [%s]" % ip)
- os_sfc_utils.get_tacker_items()
- odl_utils.get_odl_items(odl_ip, odl_port)
- sys.exit(1)
- logger.info("Successful ping to floating IP [%s]" % ip)
-
- if not test_utils.check_ssh([sf1_floating_ip, sf2_floating_ip]):
- logger.error("Cannot establish SSH connection to the SFs")
- sys.exit(1)
-
- logger.info("Starting HTTP server on %s" % server_floating_ip)
- if not test_utils.start_http_server(server_floating_ip):
- logger.error('\033[91mFailed to start HTTP server on %s\033[0m'
- % server_floating_ip)
- sys.exit(1)
-
- logger.info("Starting SSH firewall on %s" % sf1_floating_ip)
- test_utils.start_vxlan_tool(sf1_floating_ip, block="22")
- logger.info("Starting HTTP firewall on %s" % sf2_floating_ip)
- test_utils.start_vxlan_tool(sf2_floating_ip, block="80")
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- t1.join()
-
- logger.info("Test SSH")
- if test_utils.is_ssh_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "SSH Blocked")
- else:
- error = ('\033[91mTEST 1 [FAILED] ==> SSH NOT BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "SSH Blocked")
-
- logger.info("Test HTTP")
- if not test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP works")
- else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP works")
-
- logger.info("Changing the classification")
-
- os_sfc_utils.delete_vnffg(tacker_client, vnffg_name='red_http_works')
-
- os_sfc_utils.delete_vnffgd(tacker_client, vnffgd_name='red')
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- TESTCASE_CONFIG.test_vnffgd_blue)
-
- os_sfc_utils.create_vnffgd(tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='blue')
-
- os_sfc_utils.create_vnffg_with_param_file(tacker_client, 'blue',
- 'blue_ssh',
- default_param_file,
- neutron_port)
-
- # Start measuring the time it takes to implement the classification rules
- t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip, odl_port,))
- try:
- t2.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- t2.join()
-
- logger.info("Test HTTP")
- if test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
- else:
- error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP Blocked")
-
- logger.info("Test SSH")
- if not test_utils.is_ssh_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "SSH works")
- else:
- error = ('\033[91mTEST 4 [FAILED] ==> SSH BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "SSH works")
-
- return results.compile_summary(), openstack_sfc.creators
+if __name__ == '__main__':
+ TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_two_chains_SSH_and_HTTP')
+ supported_installers = ['fuel', 'apex', 'osa', 'compass']
+ vnf_names = ['testVNF1', 'testVNF2']
-if __name__ == '__main__':
- logging.config.fileConfig(COMMON_CONFIG.functest_logging_api)
- main()
+ test_run = SfcTwoChainsSSHandHTTP(TESTCASE_CONFIG, supported_installers,
+ vnf_names)
+ test_run.run()
diff --git a/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd1.yaml b/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd1.yaml
index 4042222c..cc5947c6 100644
--- a/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd1.yaml
+++ b/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd1.yaml
@@ -15,10 +15,10 @@ topology_template:
nfv_compute:
properties:
num_cpus: 1
- mem_size: 2 GB
- disk_size: 10 GB
+ mem_size: 500 MB
+ disk_size: 1 GB
properties:
- image: sfc_nsh_euphrates
+ image: sfc_nsh_fraser
availability_zone: {get_input: zone}
mgmt_driver: noop
config: |
diff --git a/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd2.yaml b/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd2.yaml
index 42308c6c..395245a9 100644
--- a/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd2.yaml
+++ b/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd2.yaml
@@ -15,10 +15,10 @@ topology_template:
nfv_compute:
properties:
num_cpus: 1
- mem_size: 2 GB
- disk_size: 10 GB
+ mem_size: 500 MB
+ disk_size: 1 GB
properties:
- image: sfc_nsh_euphrates
+ image: sfc_nsh_fraser
availability_zone: {get_input: zone}
mgmt_driver: noop
config: |
diff --git a/sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml b/sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml
index 1f4c11f6..bf175ef7 100644
--- a/sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml
+++ b/sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml
@@ -15,12 +15,15 @@ topology_template:
nfv_compute:
properties:
num_cpus: 1
- mem_size: 2 GB
- disk_size: 10 GB
+ mem_size: 500 MB
+ disk_size: 1 GB
properties:
- image: sfc_nsh_euphrates
+ image: sfc_nsh_fraser
availability_zone: {get_input: zone}
mgmt_driver: noop
+ config: |
+ param0: key1
+ param1: key2
service_type: firewall
monitoring_policy:
name: ping
@@ -46,6 +49,18 @@ topology_template:
- virtualBinding:
node: VDU1
+ CP2:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ management: false
+ order: 1
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL1
+ - virtualBinding:
+ node: VDU1
+
VL1:
type: tosca.nodes.nfv.VL
properties:
diff --git a/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd1.yaml b/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd1.yaml
index 4042222c..cc5947c6 100644
--- a/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd1.yaml
+++ b/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd1.yaml
@@ -15,10 +15,10 @@ topology_template:
nfv_compute:
properties:
num_cpus: 1
- mem_size: 2 GB
- disk_size: 10 GB
+ mem_size: 500 MB
+ disk_size: 1 GB
properties:
- image: sfc_nsh_euphrates
+ image: sfc_nsh_fraser
availability_zone: {get_input: zone}
mgmt_driver: noop
config: |
diff --git a/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd2.yaml b/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd2.yaml
index ac4f223d..df719b9e 100644
--- a/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd2.yaml
+++ b/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd2.yaml
@@ -15,10 +15,10 @@ topology_template:
nfv_compute:
properties:
num_cpus: 1
- mem_size: 2 GB
- disk_size: 10 GB
+ mem_size: 500 MB
+ disk_size: 1 GB
properties:
- image: sfc_nsh_euphrates
+ image: sfc_nsh_fraser
availability_zone: {get_input: zone}
mgmt_driver: noop
config: |
diff --git a/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd-pike.yaml b/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd-pike.yaml
new file mode 100644
index 00000000..3f10e6b8
--- /dev/null
+++ b/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd-pike.yaml
@@ -0,0 +1,38 @@
+---
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: test-case2_HTTP Test
+
+topology_template:
+ description: topology-template-test2
+ inputs:
+ net_src_port_id:
+ type: string
+
+ node_templates:
+ Forwarding_path1:
+ type: tosca.nodes.nfv.FP.Tacker
+ description: creates path
+ properties:
+ id: 1
+ policy:
+ type: ACL
+ criteria:
+ - network_src_port_id: {get_input: net_src_port_id}
+ - destination_port_range: 80-80
+ - ip_proto: 6
+ path:
+ - forwarder: test-vnfd1
+ capability: CP1
+
+ groups:
+ VNFFG1:
+ type: tosca.groups.nfv.VNFFG
+ description: creates chain
+ properties:
+ vendor: tacker
+ version: 1.0
+ number_of_endpoints: 1
+ dependent_virtual_link: [VL1]
+ connection_point: [CP1]
+ constituent_vnfs: [test-vnfd1]
+ members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml b/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml
new file mode 100644
index 00000000..28b78ead
--- /dev/null
+++ b/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml
@@ -0,0 +1,38 @@
+---
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: test-case2_HTTP Test
+
+topology_template:
+ description: topology-template-test2
+ inputs:
+ net_src_port_id:
+ type: string
+
+ node_templates:
+ Forwarding_path1:
+ type: tosca.nodes.nfv.FP.Tacker
+ description: creates path
+ properties:
+ id: 1
+ policy:
+ type: ACL
+ criteria:
+ - network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 80-80
+ ip_proto: 6
+ path:
+ - forwarder: test-vnfd1
+ capability: CP1
+
+ groups:
+ VNFFG1:
+ type: tosca.groups.nfv.VNFFG
+ description: creates chain
+ properties:
+ vendor: tacker
+ version: 1.0
+ number_of_endpoints: 1
+ dependent_virtual_link: [VL1]
+ connection_point: [CP1]
+ constituent_vnfs: [test-vnfd1]
+ members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd-pike.yaml b/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd-pike.yaml
new file mode 100644
index 00000000..27c7d545
--- /dev/null
+++ b/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd-pike.yaml
@@ -0,0 +1,40 @@
+---
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: test-case1
+
+topology_template:
+ description: topology-template-test1
+ inputs:
+ net_src_port_id:
+ type: string
+
+ node_templates:
+ Forwarding_path1:
+ type: tosca.nodes.nfv.FP.Tacker
+ description: creates path
+ properties:
+ id: 1
+ policy:
+ type: ACL
+ criteria:
+ - network_src_port_id: {get_input: net_src_port_id}
+ - destination_port_range: 80-80
+ - ip_proto: 6
+ path:
+ - forwarder: test-vnfd1
+ capability: CP1
+ - forwarder: test-vnfd2
+ capability: CP1
+
+ groups:
+ VNFFG1:
+ type: tosca.groups.nfv.VNFFG
+ description: creates chain
+ properties:
+ vendor: tacker
+ version: 1.0
+ number_of_endpoints: 2
+ dependent_virtual_link: [VL1, VL1]
+ connection_point: [CP1, CP1]
+ constituent_vnfs: [test-vnfd1, test-vnfd2]
+ members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml b/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml
index 27c7d545..544d6e8e 100644
--- a/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml
+++ b/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml
@@ -18,8 +18,8 @@ topology_template:
type: ACL
criteria:
- network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 80-80
- - ip_proto: 6
+ destination_port_range: 80-80
+ ip_proto: 6
path:
- forwarder: test-vnfd1
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml b/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml
new file mode 100644
index 00000000..6b14df1b
--- /dev/null
+++ b/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml
@@ -0,0 +1,46 @@
+---
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: test-case-symmetric
+
+topology_template:
+ description: topology-template-test1
+ inputs:
+ net_src_port_id:
+ type: string
+ net_dst_port_id:
+ type: string
+ ip_dst_prefix:
+ type: string
+
+ node_templates:
+ Forwarding_path1:
+ type: tosca.nodes.nfv.FP.Tacker
+ description: creates path
+ properties:
+ id: 1
+ policy:
+ type: ACL
+ criteria:
+ - network_src_port_id: {get_input: net_src_port_id}
+ - network_dst_port_id: {get_input: net_dst_port_id}
+ - ip_dst_prefix: {get_input: ip_dst_prefix}
+ - destination_port_range: 80-80
+ - ip_proto: 6
+ path:
+ - forwarder: test-vnfd1
+ capability: CP1
+ - forwarder: test-vnfd1
+ capability: CP2
+
+ groups:
+ VNFFG1:
+ type: tosca.groups.nfv.VNFFG
+ description: creates chain
+ properties:
+ vendor: tacker
+ version: 1.0
+ number_of_endpoints: 2
+ dependent_virtual_link: [VL1, VL1]
+ connection_point: [CP1, CP2]
+ constituent_vnfs: [test-vnfd1, test-vnfd1]
+ members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml-queens b/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml-queens
new file mode 100644
index 00000000..c40c447c
--- /dev/null
+++ b/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml-queens
@@ -0,0 +1,46 @@
+---
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: test-case-symmetric
+
+topology_template:
+ description: topology-template-test1
+ inputs:
+ net_src_port_id:
+ type: string
+ net_dst_port_id:
+ type: string
+ ip_dst_prefix:
+ type: string
+
+ node_templates:
+ Forwarding_path1:
+ type: tosca.nodes.nfv.FP.Tacker
+ description: creates path
+ properties:
+ id: 1
+ policy:
+ type: ACL
+ criteria:
+ - network_src_port_id: {get_input: net_src_port_id}
+ network_dst_port_id: {get_input: net_dst_port_id}
+ ip_dst_prefix: {get_input: ip_dst_prefix}
+ destination_port_range: 80-80
+ ip_proto: 6
+ path:
+ - forwarder: test-vnfd1
+ capability: CP1
+ - forwarder: test-vnfd1
+ capability: CP2
+
+ groups:
+ VNFFG1:
+ type: tosca.groups.nfv.VNFFG
+ description: creates chain
+ properties:
+ vendor: tacker
+ version: 1.0
+ number_of_endpoints: 2
+ dependent_virtual_link: [VL1, VL1]
+ connection_point: [CP1, CP2]
+ constituent_vnfs: [test-vnfd1, test-vnfd1]
+ members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1-pike.yaml b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1-pike.yaml
new file mode 100644
index 00000000..f0615e4e
--- /dev/null
+++ b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1-pike.yaml
@@ -0,0 +1,38 @@
+---
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: test-case2_HTTP Test
+
+topology_template:
+ description: topology-template-test2
+ inputs:
+ net_src_port_id:
+ type: string
+
+ node_templates:
+ Forwarding_path1:
+ type: tosca.nodes.nfv.FP.Tacker
+ description: creates path
+ properties:
+ id: 1
+ policy:
+ type: ACL
+ criteria:
+ - network_src_port_id: {get_input: net_src_port_id}
+ - destination_port_range: 22-80
+ - ip_proto: 6
+ path:
+ - forwarder: test-vnfd1
+ capability: CP1
+
+ groups:
+ VNFFG1:
+ type: tosca.groups.nfv.VNFFG
+ description: creates chain
+ properties:
+ vendor: tacker
+ version: 1.0
+ number_of_endpoints: 1
+ dependent_virtual_link: [VL1]
+ connection_point: [CP1]
+ constituent_vnfs: [test-vnfd1]
+ members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml
index f0615e4e..ceee363b 100644
--- a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml
+++ b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml
@@ -1,25 +1,32 @@
---
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: test-case2_HTTP Test
+description: test-two-chains_HTTP Test
topology_template:
- description: topology-template-test2
+ description: topology-template-test-two-chains
inputs:
net_src_port_id:
type: string
node_templates:
Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
+ type: tosca.nodes.nfv.FP.TackerV2
description: creates path
properties:
id: 1
policy:
type: ACL
criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 22-80
- - ip_proto: 6
+ - name: get_ssh
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 22-22
+ ip_proto: 6
+ - name: get_http
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 80-80
+ ip_proto: 6
path:
- forwarder: test-vnfd1
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2-pike.yaml b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2-pike.yaml
new file mode 100644
index 00000000..ec18c9d6
--- /dev/null
+++ b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2-pike.yaml
@@ -0,0 +1,39 @@
+---
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: test-case2_SSH Test
+
+topology_template:
+ description: topology-template-test2
+ inputs:
+ net_src_port_id:
+ type: string
+
+
+ node_templates:
+ Forwarding_path1:
+ type: tosca.nodes.nfv.FP.Tacker
+ description: creates path
+ properties:
+ id: 2
+ policy:
+ type: ACL
+ criteria:
+ - network_src_port_id: {get_input: net_src_port_id}
+ - destination_port_range: 22-80
+ - ip_proto: 6
+ path:
+ - forwarder: test-vnfd2
+ capability: CP1
+
+ groups:
+ VNFFG1:
+ type: tosca.groups.nfv.VNFFG
+ description: creates chain
+ properties:
+ vendor: tacker
+ version: 1.0
+ number_of_endpoints: 1
+ dependent_virtual_link: [VL1]
+ connection_point: [CP1]
+ constituent_vnfs: [test-vnfd2]
+ members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml
index ec18c9d6..15739cc7 100644
--- a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml
+++ b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml
@@ -1,26 +1,32 @@
---
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: test-case2_SSH Test
+description: test-two-chains_SSH Test
topology_template:
- description: topology-template-test2
+ description: topology-template-test-two-chains
inputs:
net_src_port_id:
type: string
-
node_templates:
Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
+ type: tosca.nodes.nfv.FP.TackerV2
description: creates path
properties:
id: 2
policy:
type: ACL
criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 22-80
- - ip_proto: 6
+ - name: get_ssh
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 22-22
+ ip_proto: 6
+ - name: get_http
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 80-80
+ ip_proto: 6
path:
- forwarder: test-vnfd2
capability: CP1