aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO.yaml1
-rw-r--r--docs/release/configguide/feature.configuration.rst6
-rw-r--r--docs/release/release-notes/releasenotes.rst86
-rw-r--r--docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst36
-rw-r--r--docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst38
-rw-r--r--docs/release/scenarios/os-odl-sfc_fdio-ha/index.rst18
-rw-r--r--docs/release/scenarios/os-odl-sfc_fdio-ha/scenario.description.rst44
-rw-r--r--docs/release/scenarios/os-odl-sfc_fdio-noha/index.rst18
-rw-r--r--docs/release/scenarios/os-odl-sfc_fdio-noha/scenario.description.rst44
-rw-r--r--docs/release/userguide/feature.userguide.rst11
-rw-r--r--requirements.txt20
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/README11
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml212
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/openstack_user_config.yml282
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/openstack_user_config.yml186
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/openstack_user_config.yml188
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-pike.yml22
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/neutron.yml97
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/nova.yml115
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/all_tacker.yml14
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/haproxy_config.yml286
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_pike.yml217
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/os-tacker-install.yml63
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/setup-openstack.yml45
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker.yml36
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker_all.yml34
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/user_secrets.yml163
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-pike.yml16
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-config-files.yml (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-master.yml)4
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-pike.yml57
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml13
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/post-deployment.yml17
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_suse.yml.j23
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j212
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_suse.yml.j23
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j212
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_suse.yml.j23
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j212
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/README12
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/defaults/main.yml22
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/ha/openstack_user_config.yml (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/ha/openstack_user_config.yml)78
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/mini/openstack_user_config.yml (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/mini/openstack_user_config.yml)10
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/noha/openstack_user_config.yml (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml)10
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/configure-opnfvhost.yml74
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/copy-OSA-config-files.yml20
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/install-osm.yml32
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/main.yml12
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/post-deployment.yml27
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/register-vim.yml30
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_pike.yml)34
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/lxd-bridge.j216
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_pike.yml)30
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_pike.yml)30
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/vars/main.yml27
-rw-r--r--scenarios/os-odl-sfc_osm/xci_overrides10
-rw-r--r--setup.cfg2
-rw-r--r--sfc/lib/cleanup.py81
-rw-r--r--sfc/lib/config.py76
-rw-r--r--sfc/lib/odl_utils.py83
-rw-r--r--sfc/lib/openstack_utils.py499
-rw-r--r--sfc/lib/test_utils.py16
-rw-r--r--sfc/tests/NAME_tests.py11
-rw-r--r--sfc/tests/functest/README.tests37
-rw-r--r--sfc/tests/functest/config.yaml11
-rw-r--r--sfc/tests/functest/pod.yaml.sample58
-rw-r--r--sfc/tests/functest/run_sfc_tests.py56
-rw-r--r--sfc/tests/functest/setup_scripts/__init__.py0
-rw-r--r--sfc/tests/functest/setup_scripts/compute_presetup_CI.bash27
-rw-r--r--sfc/tests/functest/setup_scripts/delete.sh8
-rw-r--r--sfc/tests/functest/setup_scripts/delete_symmetric.sh9
-rw-r--r--sfc/tests/functest/setup_scripts/prepare_odl_sfc.py92
-rw-r--r--sfc/tests/functest/setup_scripts/server_presetup_CI.bash13
-rw-r--r--sfc/tests/functest/sfc_chain_deletion.py18
-rw-r--r--sfc/tests/functest/sfc_one_chain_two_service_functions.py17
-rw-r--r--sfc/tests/functest/sfc_parent_function.py432
-rw-r--r--sfc/tests/functest/sfc_symmetric_chain.py75
-rw-r--r--sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py41
-rw-r--r--sfc/unit_tests/unit/lib/test_cleanup.py225
-rw-r--r--sfc/unit_tests/unit/lib/test_odl_utils.py817
-rw-r--r--sfc/unit_tests/unit/lib/test_openstack_utils.py2504
-rw-r--r--sfc/unit_tests/unit/lib/test_test_utils.py71
-rw-r--r--test-requirements.txt7
-rw-r--r--tox.ini17
83 files changed, 5879 insertions, 2343 deletions
diff --git a/INFO.yaml b/INFO.yaml
index 81a1f9e9..869357a0 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -8,6 +8,7 @@ project_lead: &opnfv_sfc_ptl
email: 'manuelbuil87@gmail.com'
company: 'gmail.com'
timezone: 'Unkown'
+ id: 'mbuil'
primary_contact: *opnfv_sfc_ptl
issue_tracking:
type: 'jira'
diff --git a/docs/release/configguide/feature.configuration.rst b/docs/release/configguide/feature.configuration.rst
index ad9725ed..4cf2718f 100644
--- a/docs/release/configguide/feature.configuration.rst
+++ b/docs/release/configguide/feature.configuration.rst
@@ -12,10 +12,8 @@ SFC feature desciription
For details of the scenarios and their provided capabilities refer to
the scenario description documents:
-- http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/scenarios/os-odl-sfc-ha/index.html
-
-- http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/scenarios/os-odl-sfc-noha/index.html
-
+- :ref:`<os-odl-sfc-ha>`
+- :ref:`<os-odl-sfc-noha>`
The SFC feature enables creation of Service Fuction Chains - an ordered list
of chained network funcions (e.g. firewalls, NAT, QoS)
diff --git a/docs/release/release-notes/releasenotes.rst b/docs/release/release-notes/releasenotes.rst
index d6d866ae..22ae740e 100644
--- a/docs/release/release-notes/releasenotes.rst
+++ b/docs/release/release-notes/releasenotes.rst
@@ -5,25 +5,24 @@
Abstract
========
-This document compiles the release notes for the Fraser release of
+This document compiles the release notes for the Hunter release of
OPNFV SFC
Important notes
===============
These notes provide release information for the use of SFC with the
-Apex installer, xci tool and Compass4NFV for the Fraser release of OPNFV.
+Apex installer and xci tool for the Hunter release of OPNFV.
Summary
=======
-The goal of the SFC Fraser release is to integrate the OpenDaylight
-SFC project into an OPNFV environment, with either the Apex installer,
-xci tools or Compass4NFV.
+The goal of the SFC release is to integrate the OpenDaylight SFC project
+into an OPNFV environment, with either the Apex installer or xci tool.
More information about OpenDaylight and SFC can be found here.
-- `OpenDaylight <http://www.opendaylight.org>`_ version "Oxygen SR1"
+- `OpenDaylight <http://www.opendaylight.org>`_ version "Fluorine SR1"
- `Service function chaining <https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home>`_
@@ -32,11 +31,9 @@ More information about OpenDaylight and SFC can be found here.
- Overall OPNFV documentation
- - `Design document <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/development/design/index.html>`_
-
- - `User Guide <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/userguide/index.html>`_
-
- - `Installation Instructions <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/configguide/index.html>`_
+ - :ref:`Design document <sfc-design>`
+ - :ref:`User Guide <sfc-userguide>`
+ - :ref:`Installation Instructions <sfc-configguide>`
- Release Notes (this document)
@@ -48,18 +45,16 @@ Release Data
| **Project** | sfc |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | opnfv-6.2.0 |
+| **Repo/tag** | opnfv-8.0.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Fraser 6.2 |
+| **Release designation** | Hunter 8.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 29th June 2018 |
+| **Release date** | May 10th, 2019 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Move to OpenStack Pike and ODL Oxygen|
-| | Support symmetric testcases |
-| | Support master branch of OpenStack |
+| **Purpose of the delivery** | Project maintenance |
+--------------------------------------+--------------------------------------+
Version change
@@ -69,22 +64,20 @@ Module version changes
~~~~~~~~~~~~~~~~~~~~~~
This release of OPNFV sfc is based on following upstream versions:
-- OpenStack Pike release
+- OpenStack Rocky release
-- OpenDaylight Oxygen SR1 release
+- OpenDaylight Fluorine SR1 release
-- Open vSwitch 2.6.1 with Yi Yang NSH patch
+- Open vSwitch 2.9.2
Document changes
~~~~~~~~~~~~~~~~
-This is the first tracked version of OPNFV SFC Fraser. It comes with
+This is the first tracked version of OPNFV SFC Hunter. It comes with
the following documentation:
-- `Design document <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/development/design/index.html>`_
-
-- `User Guide <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/userguide/index.html>`_
-
-- `Installation Instructions <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/configguide/index.html>`_
+- :ref:`Design document <sfc-design>`
+- :ref:`User Guide <sfc-userguide:>`
+- :ref:`Installation Instructions <sfc-configguide:>`
- Release notes (This document)
@@ -94,10 +87,6 @@ Reason for version
Feature additions
~~~~~~~~~~~~~~~~~
-- `Using SNAPS as base for our tests`
-- `Increase test coverage with two extra test cases: symmetric and deletion`
-- `Reduced the footprint of the image we use for testing to reduce testing time`
-
Bug corrections
~~~~~~~~~~~~~~~
@@ -107,17 +96,14 @@ Deliverables
Software deliverables
~~~~~~~~~~~~~~~~~~~~~
-No specific deliverables are created, as SFC is included with Apex and Compass4NFV.
+No specific deliverables are created, as SFC is included with Apex
Documentation deliverables
~~~~~~~~~~~~~~~~~~~~~~~~~~
-- `Design document <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/development/design/index.html>`_
-
-- `User Guide <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/userguide/index.html>`_
-
-- `Installation Instructions <http://docs.opnfv.org/en/stable-fraser/submodules/sfc/docs/release/configguide/index.html>`_
-
+- :ref:`Design document <sfc-design>`
+- :ref:`User Guide <sfc-userguide:>`
+- :ref:`Installation Instructions <sfc-configguide:>`
- Release notes (This document)
Known Limitations, Issues and Workarounds
@@ -126,32 +112,18 @@ Known Limitations, Issues and Workarounds
System Limitations
------------------
-The Fraser 1.0 release has a few limitations:
-
-1 - The testcase sfc_two_chains_SSH_and_HTTP is disabled in this release due to
-a missing feature in ODL. We are unable to currently update a chain config
-
Known issues
------------
-1 - When tacker is deployed without Mistral, there is an ERROR in the logs and
-the VIM is always in 'PENDING' state because tacker cannot monitor its health.
-However, everything works and SFs can be created.
-
-2 - When tacker is deployed without barbican, it cannot be in HA mode because
-barbican is the only way to fetch the fernet keys.
-
Workarounds
-----------
Test results
============
-The Fraser release of SFC has undergone QA test runs with Functest tests on the
-Apex and Compass installers and xci utility
References
==========
-For more information on the OPNFV Fraser release, please see:
+For more information on the OPNFV Hunter release, please see:
OPNFV
-----
@@ -160,12 +132,12 @@ OPNFV
2) `OPNFV documentation- and software downloads <https://www.opnfv.org/software/download>`_
-3) `OPNFV Fraser release <https://docs.opnfv.org/en/stable-fraser/index.html>`_
+3) `OPNFV Hunter release <https://docs.opnfv.org/en/latest/index.html>`_
OpenStack
---------
-4) `OpenStack Pike Release artifacts <http://www.openstack.org/software/pike>`_
+4) `OpenStack Rocky Release artifacts <http://www.openstack.org/software/rocky>`_
5) `OpenStack documentation <http://docs.openstack.org>`_
@@ -173,9 +145,3 @@ OpenDaylight
------------
6) `OpenDaylight artifacts <http://www.opendaylight.org/software/downloads>`_
-
-Open vSwitch with NSH
----------------------
-
-7) https://github.com/yyang13/ovs_nsh_patches
-
diff --git a/docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst b/docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst
index 65ba3c7e..b9195466 100644
--- a/docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst
+++ b/docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst
@@ -8,7 +8,7 @@ Introduction
The os-odl-sfc-ha is intended to be used to install the OPNFV SFC project in a standard
OPNFV High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC project
-into the OPNFV environment. The OPNFV SFC Fraser release uses the OpenDaylight Oxygen SR1 release.
+into the OPNFV environment. The OPNFV SFC Gambia release uses the OpenDaylight Fluorine SR1 release.
Scenario components and composition
===================================
@@ -53,11 +53,14 @@ will automatically be installed.
The VNF Manager
---------------
-In order to create a VM for each Service Function, a VNF Manager is needed. The OPNFV
+In order to create a VM for each Service Function, a VNF Manager is recommended. The OPNFV
SFC project currently uses the Tacker OpenStack project as a VNF Manager. Tacker is
installed on the controller node and manages VNF life cycle, and coordinates VM creation
and SFC configuration with OpenStack and OpenDaylight SFC project.
+It is also possible to run tests without a VNF Manager, steering SFC through networking-sfc
+project
+
Scenario usage overview
=======================
.. Provide a brief overview on how to use the scenario and the features available to the
@@ -66,7 +69,7 @@ Scenario usage overview
Once this scenario is installed, it will be possible to create Service Chains and
classification entries to map tenant traffic to individual, pre-defined Service Chains.
-All configuration can be performed using the Tacker CLI.
+All configuration can be performed using the Tacker CLI or the networking-sfc CLI.
Limitations, Issues and Workarounds
===================================
@@ -77,16 +80,21 @@ Limitations, Issues and Workarounds
Specific version of OVS
-----------------------
-SFC needs changes in OVS to include the Network Service Headers (NSH) Service Chaining
-encapsulation. This OVS patch has been ongoing for quite a while (2 years+), and still
-has not been officially merged. Previously, SFC used NSH from a branched version of OVS
-based on 2.3.90, called the "Pritesh Patch". In the OpenDaylight Oxygen SR1 release, SFC was
-changed to use a newer, branched version of OVS based on 2.6.1, called the "Yi Yang
-Patch".
+SFC needs OVS 2.9.2 or higher because it includes the Network Service Headers (NSH)
+Service Chaining encapsulation.
+
+How to deploy the scenario
+==========================
+
+There are three tools which can be used to deploy the scenario:
+
+- Apex - https://opnfv-apex.readthedocs.io/en/latest/release/installation/index.html#apex-installation
+- XCI tool - https://opnfv-releng-xci.readthedocs.io/en/stable/xci-user-guide.html#user-guide
+- Compass - https://opnfv-compass4nfv.readthedocs.io/en/stable-gambia/release/installation/index.html#compass4nfv-installation-instructions
+
+For more information about how to deploy the sfc scenario, check:
-The older version of OVS only supported VXLAN-GPE + NSH encapsulation, but the newer
-version supports both ETH + NSH and VXLAN-GPE + ETH + NSH. Currently SFC is only
-implemented with VXLAN-GPE + ETH + NSH.
+https://wiki.opnfv.org/display/sfc/Deploy+OPNFV+SFC+scenarios
References
==========
@@ -97,6 +105,6 @@ https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home
https://wiki.opendaylight.org/view/Service_Function_Chaining:Main
-For more information on the OPNFV Fraser release, please visit:
+For more information on the OPNFV Gambia release, please visit:
-https://docs.opnfv.org/en/stable-fraser/index.html
+https://docs.opnfv.org/en/stable-gambia/index.html
diff --git a/docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst b/docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst
index 65ba3c7e..11f787c5 100644
--- a/docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst
+++ b/docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst
@@ -6,9 +6,9 @@ Introduction
============
.. In this section explain the purpose of the scenario and the types of capabilities provided
-The os-odl-sfc-ha is intended to be used to install the OPNFV SFC project in a standard
+The os-odl-sfc-noha is intended to be used to install the OPNFV SFC project in a standard
OPNFV High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC project
-into the OPNFV environment. The OPNFV SFC Fraser release uses the OpenDaylight Oxygen SR1 release.
+into the OPNFV environment. The OPNFV SFC Gambia release uses the OpenDaylight Fluorine SR1 release.
Scenario components and composition
===================================
@@ -53,11 +53,14 @@ will automatically be installed.
The VNF Manager
---------------
-In order to create a VM for each Service Function, a VNF Manager is needed. The OPNFV
+In order to create a VM for each Service Function, a VNF Manager is recommended. The OPNFV
SFC project currently uses the Tacker OpenStack project as a VNF Manager. Tacker is
installed on the controller node and manages VNF life cycle, and coordinates VM creation
and SFC configuration with OpenStack and OpenDaylight SFC project.
+It is also possible to run tests without a VNF Manager, steering SFC through networking-sfc
+project
+
Scenario usage overview
=======================
.. Provide a brief overview on how to use the scenario and the features available to the
@@ -66,7 +69,7 @@ Scenario usage overview
Once this scenario is installed, it will be possible to create Service Chains and
classification entries to map tenant traffic to individual, pre-defined Service Chains.
-All configuration can be performed using the Tacker CLI.
+All configuration can be performed using the Tacker CLI or the networking-sfc CLI.
Limitations, Issues and Workarounds
===================================
@@ -77,16 +80,21 @@ Limitations, Issues and Workarounds
Specific version of OVS
-----------------------
-SFC needs changes in OVS to include the Network Service Headers (NSH) Service Chaining
-encapsulation. This OVS patch has been ongoing for quite a while (2 years+), and still
-has not been officially merged. Previously, SFC used NSH from a branched version of OVS
-based on 2.3.90, called the "Pritesh Patch". In the OpenDaylight Oxygen SR1 release, SFC was
-changed to use a newer, branched version of OVS based on 2.6.1, called the "Yi Yang
-Patch".
+SFC needs OVS 2.9.2 or higher because it includes the Network Service Headers (NSH)
+Service Chaining encapsulation.
+
+How to deploy the scenario
+==========================
+
+There are three tools which can be used to deploy the scenario:
+
+- Apex - https://opnfv-apex.readthedocs.io/en/latest/release/installation/index.html#apex-installation
+- XCI tool - https://opnfv-releng-xci.readthedocs.io/en/stable/xci-user-guide.html#user-guide
+- Compass - https://opnfv-compass4nfv.readthedocs.io/en/stable-gambia/release/installation/index.html#compass4nfv-installation-instructions
+
+For more information about how to deploy the sfc scenario, check:
-The older version of OVS only supported VXLAN-GPE + NSH encapsulation, but the newer
-version supports both ETH + NSH and VXLAN-GPE + ETH + NSH. Currently SFC is only
-implemented with VXLAN-GPE + ETH + NSH.
+https://wiki.opnfv.org/display/sfc/Deploy+OPNFV+SFC+scenarios
References
==========
@@ -97,6 +105,6 @@ https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home
https://wiki.opendaylight.org/view/Service_Function_Chaining:Main
-For more information on the OPNFV Fraser release, please visit:
+For more information on the OPNFV Gambia release, please visit:
-https://docs.opnfv.org/en/stable-fraser/index.html
+https://docs.opnfv.org/en/stable-gambia/index.html
diff --git a/docs/release/scenarios/os-odl-sfc_fdio-ha/index.rst b/docs/release/scenarios/os-odl-sfc_fdio-ha/index.rst
deleted file mode 100644
index 28413b2e..00000000
--- a/docs/release/scenarios/os-odl-sfc_fdio-ha/index.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-.. _os-odl-sfc_fdio-ha:
-
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-=========================================
-os-odl-sfc_fdio-ha overview and description
-=========================================
-.. This document will be used to provide a description of the scenario for an end user.
-.. You should explain the purpose of the scenario, the types of capabilities provided and
-.. the unique components that make up the scenario including how they are used.
-
-.. toctree::
- :maxdepth: 3
-
- ./scenario.description.rst
-
diff --git a/docs/release/scenarios/os-odl-sfc_fdio-ha/scenario.description.rst b/docs/release/scenarios/os-odl-sfc_fdio-ha/scenario.description.rst
deleted file mode 100644
index b9d965a3..00000000
--- a/docs/release/scenarios/os-odl-sfc_fdio-ha/scenario.description.rst
+++ /dev/null
@@ -1,44 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-Introduction
-============
-.. In this section explain the purpose of the scenario and the types of capabilities provided
-
-The os-odl-sfc_fdio-ha is intended to be used to install the OPNFV SFC project in a standard
-OPNFV High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC project
-into the OPNFV environment. The OPNFV SFC Fraser release uses the OpenDaylight Oxygen SR1 release.
-
-Scenario components and composition
-===================================
-.. In this section describe the unique components that make up the scenario,
-.. what each component provides and why it has been included in order
-.. to communicate to the user the capabilities available in this scenario.
-
-OpenDaylight features installed
--------------------------------
-
-Scenario usage overview
-=======================
-.. Provide a brief overview on how to use the scenario and the features available to the
-.. user. This should be an "introduction" to the userguide document, and explicitly link to it,
-.. where the specifics of the features are covered including examples and API's
-
-Once this scenario is installed, it will be possible to create Service Chains and
-classification entries to map tenant traffic to individual, pre-defined Service Chains.
-All configuration can be performed using the Tacker CLI.
-
-Limitations, Issues and Workarounds
-===================================
-.. Explain scenario limitations here, this should be at a design level rather than discussing
-.. faults or bugs. If the system design only provide some expected functionality then provide
-.. some insight at this point.
-
-Specific version of FD.IO
------------------------
-
-TO BE ADDED
-
-References
-==========
diff --git a/docs/release/scenarios/os-odl-sfc_fdio-noha/index.rst b/docs/release/scenarios/os-odl-sfc_fdio-noha/index.rst
deleted file mode 100644
index a77bc4c5..00000000
--- a/docs/release/scenarios/os-odl-sfc_fdio-noha/index.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-.. _os-odl-sfc_fdio-noha:
-
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-===========================================
-os-odl-sfc_fdio-noha overview and description
-===========================================
-.. This document will be used to provide a description of the scenario for an end user.
-.. You should explain the purpose of the scenario, the types of capabilities provided and
-.. the unique components that make up the scenario including how they are used.
-
-.. toctree::
- :maxdepth: 3
-
- ./scenario.description.rst
-
diff --git a/docs/release/scenarios/os-odl-sfc_fdio-noha/scenario.description.rst b/docs/release/scenarios/os-odl-sfc_fdio-noha/scenario.description.rst
deleted file mode 100644
index b9d965a3..00000000
--- a/docs/release/scenarios/os-odl-sfc_fdio-noha/scenario.description.rst
+++ /dev/null
@@ -1,44 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-Introduction
-============
-.. In this section explain the purpose of the scenario and the types of capabilities provided
-
-The os-odl-sfc_fdio-ha is intended to be used to install the OPNFV SFC project in a standard
-OPNFV High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC project
-into the OPNFV environment. The OPNFV SFC Fraser release uses the OpenDaylight Oxygen SR1 release.
-
-Scenario components and composition
-===================================
-.. In this section describe the unique components that make up the scenario,
-.. what each component provides and why it has been included in order
-.. to communicate to the user the capabilities available in this scenario.
-
-OpenDaylight features installed
--------------------------------
-
-Scenario usage overview
-=======================
-.. Provide a brief overview on how to use the scenario and the features available to the
-.. user. This should be an "introduction" to the userguide document, and explicitly link to it,
-.. where the specifics of the features are covered including examples and API's
-
-Once this scenario is installed, it will be possible to create Service Chains and
-classification entries to map tenant traffic to individual, pre-defined Service Chains.
-All configuration can be performed using the Tacker CLI.
-
-Limitations, Issues and Workarounds
-===================================
-.. Explain scenario limitations here, this should be at a design level rather than discussing
-.. faults or bugs. If the system design only provide some expected functionality then provide
-.. some insight at this point.
-
-Specific version of FD.IO
------------------------
-
-TO BE ADDED
-
-References
-==========
diff --git a/docs/release/userguide/feature.userguide.rst b/docs/release/userguide/feature.userguide.rst
index 0e9ce2cf..050a0c86 100644
--- a/docs/release/userguide/feature.userguide.rst
+++ b/docs/release/userguide/feature.userguide.rst
@@ -36,6 +36,17 @@ SFC capabilities and usage
The OPNFV SFC feature can be deployed with either the "os-odl-sfc-ha" or the
"os-odl-sfc-noha" scenario. SFC usage for both of these scenarios is the same.
+Once the deployment has been completed, the SFC test cases use information
+(e.g. INSTALLER IP, Controller IP, etc) of the environment which have been
+retrieved first from the installer in order to execute the SFC test cases properly.
+This is the default behavior.
+In case there is not an installer in place and the server for the SFC test execution
+has been prepared manually, installing all necessary components (e.g. OpenStack OpenDayLight etc)
+by hand. The user should update the "pod.yaml" file, including the all necessary details
+for each node which participates in the scenario.
+In case the dovetail project triggers the SFC test scenarios, the "pod.yaml" file will be prepared
+by dovetail project automatically.
+
As previously mentioned, Tacker is used as a VNF Manager and SFC Orchestrator. All
the configuration necessary to create working service chains and classifiers can
be performed using the Tacker command line. Refer to the `Tacker walkthrough <https://github.com/trozet/sfc-random/blob/master/tacker_sfc_apex_walkthrough.txt>`_
diff --git a/requirements.txt b/requirements.txt
index 3442097f..4d464973 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,15 +1,17 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr!=2.1.0,>=2.0.0 # Apache-2.0
-paramiko>=2.0.0 # LGPLv2.1+
-python-glanceclient>=2.8.0 # Apache-2.0
-requests>=2.14.2 # Apache-2.0
-xmltodict>=0.10.1 # MIT
-python-keystoneclient>=3.8.0 # Apache-2.0
-python-novaclient>=9.1.0 # Apache-2.0
-python-tackerclient>=0.8.0 # Apache-2.0
-PyYAML>=3.12 # MIT
+pbr!=2.1.0 # Apache-2.0
+paramiko # LGPLv2.1+
+python-glanceclient # Apache-2.0
+requests!=2.20.0 # Apache-2.0
+xmltodict # MIT
+python-keystoneclient!=2.1.0 # Apache-2.0
+python-novaclient # Apache-2.0
+python-tackerclient # Apache-2.0
+python-neutronclient # Apache-2.0
+networking-sfc>=7.0.0
+PyYAML # MIT
opnfv
snaps
xtesting # Apache-2.0
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/README b/scenarios/os-odl-sfc/role/os-odl-sfc/README
deleted file mode 100644
index a7461ec8..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/README
+++ /dev/null
@@ -1,11 +0,0 @@
-This is the role which deploys the os-odl-sfc scenarios in xci.
-
-This role currently works with:
-
-- OpenStack stable/queens
-- ODL Oxygen
-- OVS+NSH patch
-- OpenSUSE 42.3 or Ubuntu 16.04
-
-Follow this link:
-https://wiki.opnfv.org/display/sfc/Deploy+OPNFV+SFC+scenarios
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml
deleted file mode 100644
index a954dccc..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml
+++ /dev/null
@@ -1,212 +0,0 @@
-- name: ansible-hardening
- scm: git
- src: https://git.openstack.org/openstack/ansible-hardening
- version: cee2e0b5b432c50614b908d9bf50ed2cc32d8daa
-- name: apt_package_pinning
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
- version: 956e06cf66bd878b132c58bdd97304749c0da189
-- name: pip_install
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-pip_install
- version: e52f829e7386e43ca8a85ab820901740590dc6ea
-- name: galera_client
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-galera_client
- version: 79b4bd9980b75355ec729bba37a440f4c88df106
-- name: galera_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-galera_server
- version: 67628375be0e3996b0f5cbddf8e1b15d6ca85333
-- name: ceph_client
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
- version: de60aa5d96cd6803674e8b398828205909aa54a6
-- name: haproxy_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
- version: aacaaed2e36945baa7fb9e8bd6ceb8393dad3730
-- name: keepalived
- scm: git
- src: https://github.com/evrardjp/ansible-keepalived
- version: 3.0.3
-- name: lxc_container_create
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
- version: 8a3b201bbaa82a38bd162315efccec1ec244e481
-- name: lxc_hosts
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
- version: a6f5052063e7ac4157da36c4d105fdb855abd366
-- name: memcached_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
- version: 2c8cd40e18902b9aa6fab87e9fd299d437ed0a78
-- name: openstack_hosts
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
- version: deb8d59ff40d9f828172933df6028202e6c9fb04
-- name: os_keystone
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
- version: 782ec3eae6d43e00699b831859401b347273f7fd
-- name: openstack_openrc
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
- version: 452a227ea5cf50611832cd289e163d577791407a
-- name: os_aodh
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
- version: 0321e0bae7050b97fa8e3d66033fe177c31e2d63
-- name: os_barbican
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
- version: 06ef75aa9aa9bd126bd17ab86a7c38f44d901203
-- name: os_ceilometer
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
- version: 4b3e0589a0188de885659614ef4e076018af54f7
-- name: os_cinder
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
- version: 9173876f6bfc5e2955d74628b32f5cff2e9e39c3
-- name: os_designate
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_designate
- version: dd13917240c7dc9fff9df7e042ba32fb002838ce
-- name: os_glance
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_glance
- version: 91e544aadae016c0e190d52a89ce0a1659a84641
-- name: os_gnocchi
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
- version: 5f8950f61ed6b61d1cc06ab73b3b02466bee0db1
-- name: os_heat
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_heat
- version: 4d1efae631026631fb2af4f43a9fe8ca210d643e
-- name: os_horizon
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
- version: af4abbad26b4ab9ce3c50266bc212199e3e6aea8
-- name: os_ironic
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
- version: 91abf2ec56b9b4337e5e98d9ba6f2c04155331a1
-- name: os_magnum
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
- version: 0fdeea886ef4227e02d793f6dbfd54ccd9e6e088
-- name: os_molteniron
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron
- version: 0506e3c3f511518cbd5e7499e2a675b25d4ac967
-- name: os_neutron
- scm: git
- src: https://github.com/manuelbuil/openstack-ansible-os_neutron
- version: pike-SFC-support-Feb
-- name: os_nova
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_nova
- version: 312959bea6d4d577c6a4146ae81fa4044ac26d14
-- name: os_octavia
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_octavia
- version: 5fd1fbae703c17f928cfc00f60aeeed0500c6f2b
-- name: os_rally
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_rally
- version: c91eb6cc61a1f4c2136084e0df758eed117f1bbb
-- name: os_sahara
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
- version: c2ad98dcda096c34e9b63d4e44c9a231ed093fb4
-- name: os_swift
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_swift
- version: 8f5f4be2c1040220e40b8bddbdf5e3b1a1d35baa
-- name: os_tempest
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
- version: bcdfa619fe46629bdf5aa8cde5d1e843e7a7b576
-- name: os_trove
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_trove
- version: 6596f6b28c88a88c89e293ea8f5f8551eb491fd1
-- name: plugins
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-plugins
- version: 3601c1e006890899f3c794cb8654bfaca6c32d58
-- name: rabbitmq_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
- version: 5add96f3d72fb07998da715c52c46eceb54d9c4a
-- name: repo_build
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-repo_build
- version: 377c4376aef1c67c8f0cb3d3bca741bc102bf740
-- name: repo_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-repo_server
- version: 63c6537fcb7fa688e1e82074ea958b9349f58cc7
-- name: rsyslog_client
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
- version: 693255ee40a2908707fcc962d620d68008647a57
-- name: rsyslog_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
- version: fa201bbadeb959f363ecba7046f052b2ee16e474
-- name: sshd
- scm: git
- src: https://github.com/willshersystems/ansible-sshd
- version: 0.5.1
-- name: bird
- scm: git
- src: https://github.com/logan2211/ansible-bird
- version: '1.4'
-- name: etcd
- scm: git
- src: https://github.com/logan2211/ansible-etcd
- version: '1.3'
-- name: unbound
- scm: git
- src: https://github.com/logan2211/ansible-unbound
- version: '1.5'
-- name: resolvconf
- scm: git
- src: https://github.com/logan2211/ansible-resolvconf
- version: '1.3'
-- name: ceph-defaults
- scm: git
- src: https://github.com/ceph/ansible-ceph-defaults
- version: v3.0.8
-- name: ceph-common
- scm: git
- src: https://github.com/ceph/ansible-ceph-common
- version: v3.0.8
-- name: ceph-config
- scm: git
- src: https://github.com/ceph/ansible-ceph-config
- version: v3.0.8
-- name: ceph-mon
- scm: git
- src: https://github.com/ceph/ansible-ceph-mon
- version: v3.0.8
-- name: ceph-mgr
- scm: git
- src: https://github.com/ceph/ansible-ceph-mgr
- version: v3.0.8
-- name: ceph-osd
- scm: git
- src: https://github.com/ceph/ansible-ceph-osd
- version: v3.0.8
-- name: os_tacker
- scm: git
- src: https://github.com/manuelbuil/openstack-ansible-os_tacker
- version: pike-SFC-support-Feb
-- name: opendaylight
- scm: git
- src: https://git.opendaylight.org/gerrit/p/integration/packaging/ansible-opendaylight.git
- version: 4aabce0605ef0f51eef4d6564cc7d779630706c5
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/openstack_user_config.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/openstack_user_config.yml
new file mode 100644
index 00000000..899785dc
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/openstack_user_config.yml
@@ -0,0 +1,282 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.222
+ external_lb_vip_address: 192.168.122.220
+ barbican_keys_backend: true
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# barbican
+key-manager_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller01:
+ ip: 172.29.236.14
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller02:
+ ip: 172.29.236.15
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# tacker
+mano_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# ceilometer
+metering-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+ compute01:
+ ip: 172.29.236.13
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
+ controller01:
+ ip: 172.29.236.14
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
+ controller02:
+ ip: 172.29.236.15
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/openstack_user_config.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/openstack_user_config.yml
new file mode 100644
index 00000000..4ae8a83f
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/openstack_user_config.yml
@@ -0,0 +1,186 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ barbican_keys_backend: true
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# barbican
+key-manager_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# tacker
+mano_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# ceilometer
+metering-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/openstack_user_config.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/openstack_user_config.yml
new file mode 100644
index 00000000..ed8ff8f5
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/openstack_user_config.yml
@@ -0,0 +1,188 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ barbican_keys_backend: true
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# barbican
+key-manager_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# tacker
+mano_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# ceilometer
+metering-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+ compute01:
+ ip: 172.29.236.13
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-pike.yml
deleted file mode 100644
index d4a0b931..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-pike.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-# Copyright 2017, Ericsson AB
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-networking_odl_git_repo: https://git.openstack.org/openstack/networking-odl
-networking_odl_git_install_branch: stable/pike
-networking_odl_project_group: neutron_all
-
-networking_sfc_git_repo: https://git.openstack.org/openstack/networking-sfc
-networking_sfc_git_install_branch: stable/pike
-networking_sfc_project_group: neutron_all
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/neutron.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/neutron.yml
deleted file mode 100644
index fd74d8ac..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/neutron.yml
+++ /dev/null
@@ -1,97 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-component_skel:
- neutron_agent:
- belongs_to:
- - neutron_all
- neutron_dhcp_agent:
- belongs_to:
- - neutron_all
- neutron_linuxbridge_agent:
- belongs_to:
- - neutron_all
- neutron_openvswitch_agent:
- belongs_to:
- - neutron_all
- neutron_metering_agent:
- belongs_to:
- - neutron_all
- neutron_l3_agent:
- belongs_to:
- - neutron_all
- neutron_lbaas_agent:
- belongs_to:
- - neutron_all
- neutron_bgp_dragent:
- belongs_to:
- - neutron_all
- neutron_metadata_agent:
- belongs_to:
- - neutron_all
- neutron_sriov_nic_agent:
- belongs_to:
- - neutron_all
- neutron_server:
- belongs_to:
- - neutron_all
- opendaylight:
- belongs_to:
- - neutron_all
- openvswitch_nsh:
- belongs_to:
- - neutron_all
-
-container_skel:
- neutron_agents_container:
- belongs_to:
- - network_containers
- contains:
- - neutron_agent
- - neutron_metadata_agent
- - neutron_metering_agent
- - neutron_linuxbridge_agent
- - neutron_openvswitch_agent
- - openvswitch_nsh
- - neutron_l3_agent
- - neutron_dhcp_agent
- - neutron_lbaas_agent
- - neutron_bgp_dragent
- properties:
- service_name: neutron
- neutron_server_container:
- belongs_to:
- - network_containers
- contains:
- - neutron_server
- - opendaylight
- properties:
- service_name: neutron
- neutron_networking_container:
- belongs_to:
- - network_containers
- contains:
- - openvswitch_nsh
- properties:
- is_metal: true
-
-
-physical_skel:
- network_containers:
- belongs_to:
- - all_containers
- network_hosts:
- belongs_to:
- - hosts
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/nova.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/nova.yml
deleted file mode 100644
index 1aee092e..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/nova.yml
+++ /dev/null
@@ -1,115 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-component_skel:
- nova_api_metadata:
- belongs_to:
- - nova_all
- nova_api_os_compute:
- belongs_to:
- - nova_all
- nova_api_placement:
- belongs_to:
- - nova_all
- nova_compute:
- belongs_to:
- - nova_all
- nova_conductor:
- belongs_to:
- - nova_all
- nova_scheduler:
- belongs_to:
- - nova_all
- nova_console:
- belongs_to:
- - nova_all
-
-
-container_skel:
- nova_api_metadata_container:
- belongs_to:
- - compute-infra_containers
- - os-infra_containers
- contains:
- - nova_api_metadata
- properties:
- service_name: nova
- nova_api_os_compute_container:
- belongs_to:
- - compute-infra_containers
- - os-infra_containers
- contains:
- - nova_api_os_compute
- properties:
- service_name: nova
- nova_api_placement_container:
- belongs_to:
- - compute-infra_containers
- - os-infra_containers
- contains:
- - nova_api_placement
- properties:
- service_name: nova
- nova_compute_container:
- belongs_to:
- - compute_containers
- contains:
- - neutron_linuxbridge_agent
- - neutron_openvswitch_agent
- - openvswitch_nsh
- - neutron_sriov_nic_agent
- - nova_compute
- properties:
- is_metal: true
- service_name: nova
- nova_conductor_container:
- belongs_to:
- - compute-infra_containers
- - os-infra_containers
- contains:
- - nova_conductor
- properties:
- service_name: nova
- nova_scheduler_container:
- belongs_to:
- - compute-infra_containers
- - os-infra_containers
- contains:
- - nova_scheduler
- properties:
- service_name: nova
- nova_console_container:
- belongs_to:
- - compute-infra_containers
- - os-infra_containers
- contains:
- - nova_console
- properties:
- service_name: nova
-
-
-physical_skel:
- compute-infra_containers:
- belongs_to:
- - all_containers
- compute-infra_hosts:
- belongs_to:
- - hosts
- compute_containers:
- belongs_to:
- - all_containers
- compute_hosts:
- belongs_to:
- - hosts
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/all_tacker.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/all_tacker.yml
deleted file mode 100644
index 0d6b15ec..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/all_tacker.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-# Tacker
-tacker_service_user_name: tacker
-tacker_service_tenant_name: service
-
-tacker_rabbitmq_userid: tacker
-tacker_rabbitmq_vhost: /tacker
-tacker_rabbitmq_port: "{{ rabbitmq_port }}"
-tacker_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
-tacker_rabbitmq_servers: "{{ rabbitmq_servers }}"
-tacker_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
-
-tacker_service_publicuri: "{{ openstack_service_publicuri_proto|default(tacker_service_proto) }}://{{ external_lb_vip_address }}:{{ tacker_service_port }}"
-tacker_service_adminurl: "{{ tacker_service_adminuri }}/"
-
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/haproxy_config.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/haproxy_config.yml
deleted file mode 100644
index 49b58360..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/haproxy_config.yml
+++ /dev/null
@@ -1,286 +0,0 @@
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-haproxy_default_services:
- - service:
- haproxy_service_name: galera
- haproxy_backend_nodes: "{{ [groups['galera_all'][0]] | default([]) }}" # list expected
- haproxy_backup_nodes: "{{ groups['galera_all'][1:] | default([]) }}"
- haproxy_bind: "{{ [internal_lb_vip_address] }}"
- haproxy_port: 3306
- haproxy_balance_type: tcp
- haproxy_timeout_client: 5000s
- haproxy_timeout_server: 5000s
- haproxy_backend_options:
- - "mysql-check user {{ galera_monitoring_user }}"
- haproxy_whitelist_networks: "{{ haproxy_galera_whitelist_networks }}"
- - service:
- haproxy_service_name: repo_git
- haproxy_backend_nodes: "{{ groups['repo_all'] | default([]) }}"
- haproxy_bind: "{{ [internal_lb_vip_address] }}"
- haproxy_port: 9418
- haproxy_balance_type: tcp
- haproxy_backend_options:
- - tcp-check
- haproxy_whitelist_networks: "{{ haproxy_repo_git_whitelist_networks }}"
- - service:
- haproxy_service_name: repo_all
- haproxy_backend_nodes: "{{ groups['repo_all'] | default([]) }}"
- haproxy_bind: "{{ [internal_lb_vip_address] }}"
- haproxy_port: 8181
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: repo_cache
- haproxy_backend_nodes: "{{ [groups['repo_all'][0]] | default([]) }}" # list expected
- haproxy_backup_nodes: "{{ groups['repo_all'][1:] | default([]) }}"
- haproxy_bind: "{{ [internal_lb_vip_address] }}"
- haproxy_port: "{{ repo_pkg_cache_port }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /acng-report.html"
- haproxy_whitelist_networks: "{{ haproxy_repo_cache_whitelist_networks }}"
- - service:
- haproxy_service_name: glance_api
- haproxy_backend_nodes: "{{ groups['glance_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 9292
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk /healthcheck"
- - service:
- haproxy_service_name: glance_registry
- haproxy_backend_nodes: "{{ groups['glance_registry'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 9191
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk /healthcheck"
- haproxy_whitelist_networks: "{{ haproxy_glance_registry_whitelist_networks }}"
- - service:
- haproxy_service_name: gnocchi
- haproxy_backend_nodes: "{{ groups['gnocchi_all'] | default([]) }}"
- haproxy_port: 8041
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk /healthcheck"
- - service:
- haproxy_service_name: heat_api_cfn
- haproxy_backend_nodes: "{{ groups['heat_api_cfn'] | default([]) }}"
- haproxy_port: 8000
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: heat_api_cloudwatch
- haproxy_backend_nodes: "{{ groups['heat_api_cloudwatch'] | default([]) }}"
- haproxy_port: 8003
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: heat_api
- haproxy_backend_nodes: "{{ groups['heat_api'] | default([]) }}"
- haproxy_port: 8004
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: keystone_service
- haproxy_backend_nodes: "{{ groups['keystone_all'] | default([]) }}"
- haproxy_port: 5000
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: "http"
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: keystone_admin
- haproxy_backend_nodes: "{{ groups['keystone_all'] | default([]) }}"
- haproxy_port: 35357
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: "http"
- haproxy_backend_options:
- - "httpchk HEAD /"
- haproxy_whitelist_networks: "{{ haproxy_keystone_admin_whitelist_networks }}"
- - service:
- haproxy_service_name: neutron_server
- haproxy_backend_nodes: "{{ groups['neutron_server'] | default([]) }}"
- haproxy_port: 9696
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk GET /"
- - service:
- haproxy_service_name: nova_api_metadata
- haproxy_backend_nodes: "{{ groups['nova_api_metadata'] | default([]) }}"
- haproxy_port: 8775
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- haproxy_whitelist_networks: "{{ haproxy_nova_metadata_whitelist_networks }}"
- - service:
- haproxy_service_name: nova_api_os_compute
- haproxy_backend_nodes: "{{ groups['nova_api_os_compute'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 8774
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: nova_api_placement
- haproxy_backend_nodes: "{{ groups['nova_api_placement'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 8780
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- haproxy_backend_httpcheck_options:
- - "expect status 401"
- - service:
- haproxy_service_name: nova_console
- haproxy_backend_nodes: "{{ groups['nova_console'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: "{{ nova_console_port }}"
- haproxy_balance_type: http
- haproxy_timeout_client: 60m
- haproxy_timeout_server: 60m
- haproxy_balance_alg: source
- haproxy_backend_options:
- - "httpchk HEAD /"
- haproxy_backend_httpcheck_options:
- - "expect status 404"
- - service:
- haproxy_service_name: cinder_api
- haproxy_backend_nodes: "{{ groups['cinder_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 8776
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: horizon
- haproxy_backend_nodes: "{{ groups['horizon_all'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_ssl_all_vips: true
- haproxy_port: "{{ haproxy_ssl | ternary(443,80) }}"
- haproxy_backend_port: 80
- haproxy_redirect_http_port: 80
- haproxy_balance_type: http
- haproxy_balance_alg: source
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: sahara_api
- haproxy_backend_nodes: "{{ groups['sahara_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_alg: source
- haproxy_port: 8386
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk /healthcheck"
- - service:
- haproxy_service_name: swift_proxy
- haproxy_backend_nodes: "{{ groups['swift_proxy'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_alg: source
- haproxy_port: 8080
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk /healthcheck"
- - service:
- haproxy_service_name: aodh_api
- haproxy_backend_nodes: "{{ groups['aodh_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 8042
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- haproxy_backend_httpcheck_options:
- - "expect status 401"
- - service:
- haproxy_service_name: ironic_api
- haproxy_backend_nodes: "{{ groups['ironic_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 6385
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk GET /"
- - service:
- haproxy_service_name: rabbitmq_mgmt
- haproxy_backend_nodes: "{{ groups['rabbitmq'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 15672
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- haproxy_whitelist_networks: "{{ haproxy_rabbitmq_management_whitelist_networks }}"
- - service:
- haproxy_service_name: magnum
- haproxy_backend_nodes: "{{ groups['magnum_all'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 9511
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk GET /"
- - service:
- haproxy_service_name: trove
- haproxy_backend_nodes: "{{ groups['trove_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 8779
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: barbican
- haproxy_backend_nodes: "{{ groups['barbican_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 9311
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk GET /"
- - service:
- haproxy_service_name: designate_api
- haproxy_backend_nodes: "{{ groups['designate_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 9001
- haproxy_balance_type: http
- haproxy_backend_options:
- - "forwardfor"
- - "httpchk /versions"
- - "httplog"
- - service:
- haproxy_service_name: octavia
- haproxy_backend_nodes: "{{ groups['octavia_all'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 9876
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk GET /"
- haproxy_whitelist_networks: "{{ haproxy_octavia_whitelist_networks }}"
- - service:
- haproxy_service_name: tacker
- haproxy_backend_nodes: "{{ groups['tacker_all'] | default([]) }}"
- haproxy_port: 9890
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "forwardfor"
- - "httpchk"
- - "httplog"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_pike.yml
deleted file mode 100644
index 59182abb..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_pike.yml
+++ /dev/null
@@ -1,217 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-## NOTICE on items in this file:
-## * If you use anything in the *._git_install_branch field that is not a TAG
-## make sure to leave an in-line comment as to "why".
-
-## For the sake of anyone else editing this file:
-## * If you add services to this file please do so in alphabetical order.
-## * Every entry should be name spaced with the name of the client followed by an "_"
-## * All items with this file should be separated by `name_` note that the name of the
-## package should be one long name with no additional `_` separating it.
-
-
-### Before this is shipped all of these services should have a tag set as the branch,
-### or have a comment / reason attached to them as to why a tag can not work.
-
-
-## Global Requirements
-requirements_git_repo: https://git.openstack.org/openstack/requirements
-requirements_git_install_branch: 6b102588bd6c817d1957236fc24779b8912b3353 # HEAD of "stable/pike" as of 16.02.2018
-
-
-## Aodh service
-aodh_git_repo: https://git.openstack.org/openstack/aodh
-aodh_git_install_branch: f0ec11eef92d39bd3f07e026a404e2e7aa7fa6bc # HEAD of "stable/pike" as of 16.02.2018
-aodh_git_project_group: aodh_all
-
-
-## Barbican service
-barbican_git_repo: https://git.openstack.org/openstack/barbican
-barbican_git_install_branch: 1d20692a6fe77eacdafdd307498bb11da285c437 # HEAD of "stable/pike" as of 16.02.2018
-barbican_git_project_group: barbican_all
-
-
-## Ceilometer service
-ceilometer_git_repo: https://git.openstack.org/openstack/ceilometer
-ceilometer_git_install_branch: 35a33b6daacfb9792382d4d5da4880737ab78fed # HEAD of "stable/pike" as of 16.02.2018
-ceilometer_git_project_group: ceilometer_all
-
-
-## Cinder service
-cinder_git_repo: https://git.openstack.org/openstack/cinder
-cinder_git_install_branch: 4fb3a702ba8c3de24c41a6f706597bfa81e60435 # HEAD of "stable/pike" as of 16.02.2018
-cinder_git_project_group: cinder_all
-
-
-## Designate service
-designate_git_repo: https://git.openstack.org/openstack/designate
-designate_git_install_branch: 12fbb38799d6731862f07a9e299d5476a5c02f90 # HEAD of "stable/pike" as of 16.02.2018
-designate_git_project_group: designate_all
-
-
-## Horizon Designate dashboard plugin
-designate_dashboard_git_repo: https://git.openstack.org/openstack/designate-dashboard
-designate_dashboard_git_install_branch: bc128a7c29a427933fc4ca94a7510ef8c97e5206 # HEAD of "stable/pike" as of 16.02.2018
-designate_dashboard_git_project_group: horizon_all
-
-
-## Dragonflow service
-dragonflow_git_repo: https://git.openstack.org/openstack/dragonflow
-dragonflow_git_install_branch: 84f1a26ff8e976b753593dc445e09a4c1a675a21 # Frozen HEAD of "master" as of 14.10.2017 (no stable/pike branch)
-dragonflow_git_project_group: neutron_all
-
-
-## Glance service
-glance_git_repo: https://git.openstack.org/openstack/glance
-glance_git_install_branch: 5c1f76d91012c9cc7b12f76e917af9e9a9bb7667 # HEAD of "stable/pike" as of 16.02.2018
-glance_git_project_group: glance_all
-
-
-## Heat service
-heat_git_repo: https://git.openstack.org/openstack/heat
-heat_git_install_branch: cda1cf15f4a20355c58bd26e2f4ce00007a15266 # HEAD of "stable/pike" as of 16.02.2018
-heat_git_project_group: heat_all
-
-
-## Horizon service
-horizon_git_repo: https://git.openstack.org/openstack/horizon
-horizon_git_install_branch: 5d77b95586fd5491b3dee5d5c199c34f53680370 # HEAD of "stable/pike" as of 16.02.2018
-horizon_git_project_group: horizon_all
-
-## Horizon Ironic dashboard plugin
-ironic_dashboard_git_repo: https://git.openstack.org/openstack/ironic-ui
-ironic_dashboard_git_install_branch: ca9f9f1e9c2baba5415bee2e7961221a3daa6da6 # HEAD of "stable/pike" as of 16.02.2018
-ironic_dashboard_git_project_group: horizon_all
-
-## Horizon Magnum dashboard plugin
-magnum_dashboard_git_repo: https://git.openstack.org/openstack/magnum-ui
-magnum_dashboard_git_install_branch: 0b9fc50aada1a3e214acaad1204b48c96a549e5f # HEAD of "stable/pike" as of 16.02.2018
-magnum_dashboard_git_project_group: horizon_all
-
-## Horizon LBaaS dashboard plugin
-neutron_lbaas_dashboard_git_repo: https://git.openstack.org/openstack/neutron-lbaas-dashboard
-neutron_lbaas_dashboard_git_install_branch: f51341588490baca3795b6f068347cd2260d2e3b # HEAD of "stable/pike" as of 16.02.2018
-neutron_lbaas_dashboard_git_project_group: horizon_all
-
-## Horizon Sahara dashboard plugin
-sahara_dashboard_git_repo: https://git.openstack.org/openstack/sahara-dashboard
-sahara_dashboard_git_install_branch: d56477dba6f4073ab4df6126bb489743779a3270 # HEAD of "stable/pike" as of 16.02.2018
-sahara_dashboard_git_project_group: horizon_all
-
-
-## Keystone service
-keystone_git_repo: https://git.openstack.org/openstack/keystone
-keystone_git_install_branch: e8953d03926b2a5594bbc3d5d8af6854b97cddb7 # HEAD of "stable/pike" as of 16.02.2018
-keystone_git_project_group: keystone_all
-
-
-## Neutron service
-neutron_git_repo: https://git.openstack.org/openstack/neutron
-neutron_git_install_branch: f9fcf5e34ac68ec44c4b61bd76117d2d9213792b # HEAD of "stable/pike" as of 16.02.2018
-neutron_git_project_group: neutron_all
-
-neutron_lbaas_git_repo: https://git.openstack.org/openstack/neutron-lbaas
-neutron_lbaas_git_install_branch: 49448db1a69d53ad0d137216b1805690a7daef45 # HEAD of "stable/pike" as of 16.02.2018
-neutron_lbaas_git_project_group: neutron_all
-
-neutron_vpnaas_git_repo: https://git.openstack.org/openstack/neutron-vpnaas
-neutron_vpnaas_git_install_branch: 60e4e7113b5fbbf28e97ebce2f40b7f1675200e6 # HEAD of "stable/pike" as of 16.02.2018
-neutron_vpnaas_git_project_group: neutron_all
-
-neutron_fwaas_git_repo: https://git.openstack.org/openstack/neutron-fwaas
-neutron_fwaas_git_install_branch: ea4e95913d843d72c8a3079203171813ba69895d # HEAD of "stable/pike" as of 16.02.2018
-neutron_fwaas_git_project_group: neutron_all
-
-neutron_dynamic_routing_git_repo: https://git.openstack.org/openstack/neutron-dynamic-routing
-neutron_dynamic_routing_git_install_branch: 9098d4447581117e857d2f86fb4a0508b5ffbb6a # HEAD of "stable/pike" as of 16.02.2018
-neutron_dynamic_routing_git_project_group: neutron_all
-
-networking_calico_git_repo: https://git.openstack.org/openstack/networking-calico
-networking_calico_git_install_branch: cc3628125775f2f1b3c57c95db3d6b50278dc92b # HEAD of "master" as of 16.02.2018
-networking_calico_git_project_group: neutron_all
-
-## Nova service
-nova_git_repo: https://git.openstack.org/openstack/nova
-nova_git_install_branch: 806eda3da84d6f9b47c036ff138415458b837536 # HEAD of "stable/pike" as of 16.02.2018
-nova_git_project_group: nova_all
-
-
-## PowerVM Virt Driver
-nova_powervm_git_repo: https://git.openstack.org/openstack/nova-powervm
-nova_powervm_git_install_branch: e0b516ca36fa5dfd38ae6f7ea97afd9a52f313ed # HEAD of "stable/pike" as of 16.02.2018
-nova_powervm_git_project_group: nova_all
-
-
-## LXD Virt Driver
-nova_lxd_git_repo: https://git.openstack.org/openstack/nova-lxd
-nova_lxd_git_install_branch: 9747c274138d9ef40512d5015e9e581f6bbec5d9 # HEAD of "stable/pike" as of 16.02.2018
-nova_lxd_git_project_group: nova_all
-
-
-## Sahara service
-sahara_git_repo: https://git.openstack.org/openstack/sahara
-sahara_git_install_branch: a39c690aeca04dcde56384448ce577fd50eb8bf1 # HEAD of "stable/pike" as of 16.02.2018
-sahara_git_project_group: sahara_all
-
-
-## Swift service
-swift_git_repo: https://git.openstack.org/openstack/swift
-swift_git_install_branch: 0ff2d5e3b85a42914a89eac64ed9a87172334a2c # HEAD of "stable/pike" as of 16.02.2018
-swift_git_project_group: swift_all
-
-
-## Swift3 middleware
-swift_swift3_git_repo: https://git.openstack.org/openstack/swift3
-swift_swift3_git_install_branch: 1c117c96dda8113c3398c16e68b61efef397de74 # HEAD of "master" as of 16.02.2018
-swift_swift3_git_project_group: swift_all
-
-
-## Ironic service
-ironic_git_repo: https://git.openstack.org/openstack/ironic
-ironic_git_install_branch: f6f55a74526e906d061f9abcd9a1ad704f6dcfe5 # HEAD of "stable/pike" as of 16.02.2018
-ironic_git_project_group: ironic_all
-
-## Magnum service
-magnum_git_repo: https://git.openstack.org/openstack/magnum
-magnum_git_install_branch: 6d9914de87a67cd5dd33f9f37b49a28486c52cc6 # HEAD of "stable/pike" as of 16.02.2018
-magnum_git_project_group: magnum_all
-
-## Trove service
-trove_git_repo: https://git.openstack.org/openstack/trove
-trove_git_install_branch: 2a39699f8612a8f82dfce68949b714d19a102499 # HEAD of "stable/pike" as of 16.02.2018
-trove_git_project_group: trove_all
-
-## Horizon Trove dashboard plugin
-trove_dashboard_git_repo: https://git.openstack.org/openstack/trove-dashboard
-trove_dashboard_git_install_branch: bffd0776d139f38f96ce8ded07ccde4b5a41bc7a # HEAD of "stable/pike" as of 16.02.2018
-trove_dashboard_git_project_group: horizon_all
-
-## Octavia service
-octavia_git_repo: https://git.openstack.org/openstack/octavia
-octavia_git_install_branch: a39cf133518716dc1a60069f5aa46afa384db3a8 # HEAD of "stable/pike" as of 16.02.2018
-octavia_git_project_group: octavia_all
-
-## Molteniron service
-molteniron_git_repo: https://git.openstack.org/openstack/molteniron
-molteniron_git_install_branch: 094276cda77d814d07ad885e7d63de8d1243750a # HEAD of "master" as of 16.02.2018
-molteniron_git_project_group: molteniron_all
-
-## Tacker service
-tacker_git_repo: https://github.com/manuelbuil/tacker
-tacker_git_install_branch: pike-SFC-support-Feb
-tacker_git_project_group: tacker_all
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/os-tacker-install.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/os-tacker-install.yml
deleted file mode 100644
index dd965951..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/os-tacker-install.yml
+++ /dev/null
@@ -1,63 +0,0 @@
----
-# Copyright 2017, SUSE LINUX GmbH.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-- name: Install the tacker components
- hosts: tacker_all
- gather_facts: "{{ gather_facts | default(True) }}"
- max_fail_percentage: 20
- user: root
- pre_tasks:
- - include: common-tasks/os-lxc-container-setup.yml
- - include: common-tasks/rabbitmq-vhost-user.yml
- static: no
- vars:
- user: "{{ tacker_rabbitmq_userid }}"
- password: "{{ tacker_rabbitmq_password }}"
- vhost: "{{ tacker_rabbitmq_vhost }}"
- _rabbitmq_host_group: "{{ tacker_rabbitmq_host_group }}"
- when:
- - inventory_hostname == groups['tacker_all'][0]
- - groups[tacker_rabbitmq_host_group] | length > 0
- - include: common-tasks/os-log-dir-setup.yml
- vars:
- log_dirs:
- - src: "/openstack/log/{{ inventory_hostname }}-tacker"
- dest: "/var/log/tacker"
- - include: common-tasks/mysql-db-user.yml
- static: no
- vars:
- user_name: "{{ tacker_galera_user }}"
- password: "{{ tacker_container_mysql_password }}"
- login_host: "{{ tacker_galera_address }}"
- db_name: "{{ tacker_galera_database }}"
- when: inventory_hostname == groups['tacker_all'][0]
- - include: common-tasks/package-cache-proxy.yml
- roles:
- - role: "os_tacker"
- - role: "openstack_openrc"
- tags:
- - openrc
- - role: "rsyslog_client"
- rsyslog_client_log_rotate_file: tacker_log_rotate
- rsyslog_client_log_dir: "/var/log/tacker"
- rsyslog_client_config_name: "99-tacker-rsyslog-client.conf"
- tags:
- - rsyslog
- vars:
- is_metal: "{{ properties.is_metal|default(false) }}"
- tacker_galera_address: "{{ internal_lb_vip_address }}"
- environment: "{{ deployment_environment_variables | default({}) }}"
- tags:
- - tacker
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/setup-openstack.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/setup-openstack.yml
deleted file mode 100644
index 94bb5291..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/setup-openstack.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-- include: os-keystone-install.yml
-- include: os-barbican-install.yml
-- include: os-glance-install.yml
-- include: os-cinder-install.yml
-- include: os-nova-install.yml
-- include: os-neutron-install.yml
-- include: os-heat-install.yml
-- include: os-horizon-install.yml
-- include: os-ceilometer-install.yml
-- include: os-aodh-install.yml
-- include: os-designate-install.yml
-#NOTE(stevelle) Ensure Gnocchi identities exist before Swift
-- include: os-gnocchi-install.yml
- when:
- - gnocchi_storage_driver is defined
- - gnocchi_storage_driver == 'swift'
- vars:
- gnocchi_identity_only: True
-- include: os-swift-install.yml
-- include: os-gnocchi-install.yml
-- include: os-tacker-install.yml
-- include: os-ironic-install.yml
-- include: os-magnum-install.yml
-- include: os-trove-install.yml
-- include: os-sahara-install.yml
-- include: os-molteniron-install.yml
-- include: os-octavia-install.yml
-- include: os-tempest-install.yml
- when: (tempest_install | default(False)) | bool or (tempest_run | default(False)) | bool
-
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker.yml
deleted file mode 100644
index 9ceabbc2..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-# Copyright 2017, SUSE Linux GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-component_skel:
- tacker_server:
- belongs_to:
- - tacker_all
-
-
-container_skel:
- tacker_container:
- belongs_to:
- - mano_containers
- contains:
- - tacker_server
-
-
-physical_skel:
- mano_containers:
- belongs_to:
- - all_containers
- mano_hosts:
- belongs_to:
- - hosts
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker_all.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker_all.yml
deleted file mode 100644
index 2a01a160..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker_all.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-# Copyright 2017, SUSE LINUX GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-tacker_service_region: "{{ service_region }}"
-tacker_service_in_ldap: "{{ service_ldap_backend_enabled }}"
-
-tacker_aodh_enabled: "{{ groups['aodh_all'] is defined and groups['aodh_all'] | length > 0 }}"
-tacker_gnocchi_enabled: "{{ groups['gnocchi_all'] is defined and groups['gnocchi_all'] | length > 0 }}"
-
-# NOTE: these and their swift_all.yml counterpart should be moved back to all.yml once swift with tacker gets proper SSL support
-# swift_rabbitmq_telemetry_port: "{{ rabbitmq_port }}"
-# swift_rabbitmq_telemetry_use_ssl: "{{ rabbitmq_use_ssl }}"
-
-# Ensure that the package state matches the global setting
-tacker_package_state: "{{ package_state }}"
-
-# venv fetch configuration
-tacker_venv_tag: "{{ venv_tag }}"
-tacker_venv_download_url: "{{ venv_base_download_url }}/tacker-{{ openstack_release }}-{{ ansible_architecture | lower }}.tgz"
-
-# locations for fetching the default files from the git source
-tacker_git_config_lookup_location: "{{ openstack_repo_url }}/openstackgit/tacker"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/user_secrets.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/user_secrets.yml
deleted file mode 100644
index 50c7c0e8..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/user_secrets.yml
+++ /dev/null
@@ -1,163 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-############################# WARNING ########################################
-# The playbooks do not currently manage changing passwords in an existing
-# environment. Changing passwords and re-running the playbooks will fail
-# and may break your OpenStack environment.
-############################# WARNING ########################################
-
-
-## Rabbitmq Options
-rabbitmq_cookie_token:
-rabbitmq_monitoring_password:
-
-## Tokens
-memcached_encryption_key:
-
-## Galera Options
-galera_root_password:
-
-## Keystone Options
-keystone_container_mysql_password:
-keystone_auth_admin_password:
-keystone_service_password:
-keystone_rabbitmq_password:
-
-## Ceilometer Options:
-ceilometer_container_db_password:
-ceilometer_service_password:
-ceilometer_telemetry_secret:
-ceilometer_rabbitmq_password:
-
-## Aodh Options:
-aodh_container_db_password:
-aodh_service_password:
-aodh_rabbitmq_password:
-
-## Cinder Options
-cinder_container_mysql_password:
-cinder_service_password:
-cinder_profiler_hmac_key:
-cinder_rabbitmq_password:
-
-## Ceph/rbd: a UUID to be used by libvirt to refer to the client.cinder user
-cinder_ceph_client_uuid:
-
-## Glance Options
-glance_container_mysql_password:
-glance_service_password:
-glance_profiler_hmac_key:
-glance_rabbitmq_password:
-
-## Gnocchi Options:
-gnocchi_container_mysql_password:
-gnocchi_service_password:
-
-## Heat Options
-heat_stack_domain_admin_password:
-heat_container_mysql_password:
-### THE HEAT AUTH KEY NEEDS TO BE 32 CHARACTERS LONG ##
-heat_auth_encryption_key:
-### THE HEAT AUTH KEY NEEDS TO BE 32 CHARACTERS LONG ##
-heat_service_password:
-heat_rabbitmq_password:
-
-## Ironic options
-ironic_rabbitmq_password:
-ironic_container_mysql_password:
-ironic_service_password:
-ironic_swift_temp_url_secret_key:
-
-## Horizon Options
-horizon_container_mysql_password:
-horizon_secret_key:
-
-## Neutron Options
-neutron_container_mysql_password:
-neutron_service_password:
-neutron_rabbitmq_password:
-neutron_ha_vrrp_auth_password:
-
-## Nova Options
-nova_container_mysql_password:
-nova_api_container_mysql_password:
-nova_metadata_proxy_secret:
-nova_service_password:
-nova_rabbitmq_password:
-nova_placement_service_password:
-nova_placement_container_mysql_password:
-
-# LXD Options for nova compute
-lxd_trust_password:
-
-## Octavia Options
-octavia_container_mysql_password:
-octavia_service_password:
-octavia_health_hmac_key:
-octavia_rabbitmq_password:
-
-## Sahara Options
-sahara_container_mysql_password:
-sahara_rabbitmq_password:
-sahara_service_password:
-
-## Swift Options:
-swift_service_password:
-swift_dispersion_password:
-### Once the swift cluster has been setup DO NOT change these hash values!
-swift_hash_path_suffix:
-swift_hash_path_prefix:
-# Swift needs a telemetry password when using ceilometer
-swift_rabbitmq_telemetry_password:
-
-## haproxy stats password
-haproxy_stats_password:
-haproxy_keepalived_authentication_password:
-
-## Magnum Options
-magnum_service_password:
-magnum_galera_password:
-magnum_rabbitmq_password:
-magnum_trustee_password:
-
-## Rally Options:
-rally_galera_password:
-
-## Trove Options
-trove_galera_password:
-trove_rabbitmq_password:
-trove_service_password:
-trove_admin_user_password:
-trove_taskmanager_rpc_encr_key:
-trove_inst_rpc_key_encr_key:
-
-## Barbican Options
-barbican_galera_password:
-barbican_rabbitmq_password:
-barbican_service_password:
-
-## Designate Options
-designate_galera_password:
-designate_rabbitmq_password:
-designate_service_password:
-
-## Molteniron Options:
-molteniron_container_mysql_password:
-
-## Tacker options
-tacker_rabbitmq_password:
-tacker_service_password:
-tacker_container_mysql_password:
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-pike.yml
deleted file mode 100644
index 3c80fa5e..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-pike.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-
-- name: Add networking-odl and networking-sfc repos
- copy:
- src: openstack-ansible/playbooks/defaults/repo_packages/opendaylight-pike.yml
- dest: "{{openstack_osa_path}}/playbooks/defaults/repo_packages/opendaylight.yml"
-
-- name: Provide nova inventory which adds OVS-NSH hosts
- copy:
- src: openstack-ansible/playbooks/inventory_odl/env.d/nova.yml
- dest: "{{openstack_osa_path}}/playbooks/inventory/env.d/nova.yml"
-
-- name: Provide neutron inventory which adds ODL hosts
- copy:
- src: openstack-ansible/playbooks/inventory_odl/env.d/neutron.yml
- dest: "{{openstack_osa_path}}/playbooks/inventory/env.d/neutron.yml"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-master.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-config-files.yml
index fbaa7301..5d677d1c 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-master.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-config-files.yml
@@ -13,8 +13,8 @@
dest: "{{openstack_osa_etc_path}}/user_sfc_scenarios_variables.yml"
when: ansible_pkg_mgr == 'apt'
-# To get the mano_host variable (can only be defined here for the inventory)
+# To get the mano_host & metering-infra_hosts variable for inventory
- name: copy openstack_user_config.yml
copy:
- src: "tacker_files/{{xci_flavor}}/openstack_user_config.yml"
+ src: "{{xci_flavor}}/openstack_user_config.yml"
dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-pike.yml
deleted file mode 100644
index a9d197da..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-pike.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-#
-
-- name: copy user_sfc_scenarios_variables.yml
- copy:
- src: "{{xci_flavor}}/user_sfc_scenarios_variables_pike.yml"
- dest: "{{openstack_osa_etc_path}}/user_sfc_scenarios_variables.yml"
-
-- name: copy OPNFV role requirements
- copy:
- src: "ansible-role-requirements-pike.yml"
- dest: "{{openstack_osa_path}}/ansible-role-requirements.yml"
-
-- name: copy openstack_user_config.yml
- copy:
- src: "tacker_files/{{xci_flavor}}/openstack_user_config.yml"
- dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
-
-- name: copy tacker inventory file
- copy:
- src: "tacker_files/tacker.yml"
- dest: "{{openstack_osa_etc_path}}/env.d/tacker.yml"
-
-- name: copy user_secrets.yml for tacker
- copy:
- src: "tacker_files/user_secrets.yml"
- dest: "{{openstack_osa_etc_path}}/user_secrets.yml"
-
-- name: copy haproxy_config.yml for tacker
- copy:
- src: "tacker_files/haproxy_config.yml"
- dest: "{{openstack_osa_path}}/group_vars/all/haproxy_config.yml"
-
-- name: copy openstack_services.yml with tacker
- copy:
- src: "tacker_files/openstack_services_pike.yml"
- dest: "{{openstack_osa_path}}/playbooks/defaults/repo_packages/openstack_services.yml"
-
-- name: copy all/tacker.yml
- copy:
- src: "tacker_files/all_tacker.yml"
- dest: "{{openstack_osa_path}}/group_vars/all/tacker.yml"
-
-- name: copy tacker_all.yml
- copy:
- src: "tacker_files/tacker_all.yml"
- dest: "{{openstack_osa_path}}/group_vars/tacker_all.yml"
-
-- name: copy setup-openstack.yml
- copy:
- src: "tacker_files/setup-openstack.yml"
- dest: "{{openstack_osa_path}}/playbooks/setup-openstack.yml"
-
-- name: copy os-tacker-install.yml
- copy:
- src: "tacker_files/os-tacker-install.yml"
- dest: "{{openstack_osa_path}}/playbooks/os-tacker-install.yml"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml
index 628d8af8..f3b4e736 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml
@@ -8,14 +8,5 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- name: Add SFC repos and inventory for Pike
- include: add-sfc-repos-and-inventory-pike.yml
- when: openstack_osa_version == "stable/pike"
-
-- name: Copy the OSA not-yet-upstreamed files for Pike
- include: copy-OSA-files-pike.yml
- when: openstack_osa_version == "stable/pike"
-
-- name: Copy the OSA not-yet-upstreamed files for master
- include: copy-OSA-files-master.yml
- when: openstack_osa_version != "stable/pike"
+- name: Copy the OSA config files
+ include: copy-OSA-config-files.yml
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/post-deployment.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/post-deployment.yml
new file mode 100644
index 00000000..837a8ee3
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/post-deployment.yml
@@ -0,0 +1,17 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "Fetch the ip of the neutron server container"
+ shell: 'grep controller00_neutron_server_container -n1 /etc/openstack_deploy/openstack_inventory.json | grep ansible_host | cut -d":" -f2 | cut -d "\"" -f2'
+ register: ip
+ change_when: False
+
+- name: Fetch the ml2_conf.ini to process ODL variables
+ command: "scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {{ ip.stdout }}:/etc/neutron/plugins/ml2/ml2_conf.ini /tmp/ml2_conf.ini"
+ change_when: False
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_suse.yml.j2 b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_suse.yml.j2
index 435ec9df..8cec75c3 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_suse.yml.j2
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_suse.yml.j2
@@ -116,6 +116,9 @@ openstack_host_specific_kernel_modules:
- name: openvswitch
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
{% endraw %}
{% if odl_repo_version is defined %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2 b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2
index 9cc27279..c5b1f19b 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -103,17 +103,27 @@ provider_networks:
- cinder_volume
- nova_compute
+# This repo is used for OVS 2.9.2
user_external_repos_list:
- - repo: 'ppa:mardim/mardim-ppa'
+ - repo: 'deb http://ppa.launchpad.net/mardim/mardim-ppa/ubuntu xenial main'
+
+user_external_repo_keys_list:
+ - id: 6E2EEDF1A3925D9D727EB1176FAD8BA42AAAEB9F
+ keyserver: keyserver.ubuntu.com
openstack_host_specific_kernel_modules:
- name: openvswitch
ovs_nsh_required_metal_packages:
+ - python-six
+ - python3-six
- linux-headers-{{ ansible_kernel }}
- openvswitch-datapath-dkms
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
{% endraw %}
{% if odl_repo_version is defined %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_suse.yml.j2 b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_suse.yml.j2
index 32c73c24..6c46b963 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_suse.yml.j2
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_suse.yml.j2
@@ -115,6 +115,9 @@ openstack_host_specific_kernel_modules:
- name: openvswitch
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
{% endraw %}
{% if odl_repo_version is defined %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2 b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2
index bc554090..0194456e 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -102,17 +102,27 @@ provider_networks:
- cinder_volume
- nova_compute
+# This repo is for ovs 2.9.2
user_external_repos_list:
- - repo: 'ppa:mardim/mardim-ppa'
+ - repo: 'deb http://ppa.launchpad.net/mardim/mardim-ppa/ubuntu xenial main'
+
+user_external_repo_keys_list:
+ - id: 6E2EEDF1A3925D9D727EB1176FAD8BA42AAAEB9F
+ keyserver: keyserver.ubuntu.com
openstack_host_specific_kernel_modules:
- name: openvswitch
ovs_nsh_required_metal_packages:
+ - python-six
+ - python3-six
- linux-headers-{{ ansible_kernel }}
- openvswitch-datapath-dkms
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
{% endraw %}
{% if odl_repo_version is defined %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_suse.yml.j2 b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_suse.yml.j2
index 32c73c24..6c46b963 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_suse.yml.j2
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_suse.yml.j2
@@ -115,6 +115,9 @@ openstack_host_specific_kernel_modules:
- name: openvswitch
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
{% endraw %}
{% if odl_repo_version is defined %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2 b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2
index bc554090..1ec821d5 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -102,17 +102,27 @@ provider_networks:
- cinder_volume
- nova_compute
+# This repo is used for ovs 2.9.2
user_external_repos_list:
- - repo: 'ppa:mardim/mardim-ppa'
+ - repo: 'deb http://ppa.launchpad.net/mardim/mardim-ppa/ubuntu xenial main'
+
+user_external_repo_keys_list:
+ - id: 6E2EEDF1A3925D9D727EB1176FAD8BA42AAAEB9F
+ keyserver: keyserver.ubuntu.com
openstack_host_specific_kernel_modules:
- name: openvswitch
ovs_nsh_required_metal_packages:
+ - python-six
+ - python3-six
- linux-headers-{{ ansible_kernel }}
- openvswitch-datapath-dkms
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
{% endraw %}
{% if odl_repo_version is defined %}
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/README b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/README
new file mode 100644
index 00000000..b65c1d52
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/README
@@ -0,0 +1,12 @@
+This is the role which deploys the os-odl-sfc_osm scenario in xci.
+
+This role currently works with:
+
+- OpenStack stable/rocky
+- ODL Fluorine
+- OVS 2.9.2
+- OSM master
+- Ubuntu 16.04
+
+Follow this link:
+https://wiki.opnfv.org/display/sfc/Deploy+OPNFV+SFC+scenarios
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/defaults/main.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/defaults/main.yml
new file mode 100644
index 00000000..3e9829cc
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/defaults/main.yml
@@ -0,0 +1,22 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+lxd_bridge: "lxdbr0"
+lxd_domain: "lxd"
+lxd_ipv4_addr: "10.0.8.1"
+lxd_ipv4_netmask: "255.255.255.0"
+lxd_ipv4_network: "10.0.8.1/24"
+lxd_ipv4_dhcp_range: "10.0.8.2,10.0.8.254"
+lxd_ipv4_dhcp_max: "250"
+lxd_ipv4_nat: "true"
+lxd_ipv6_addr: ""
+lxd_ipv6_mask: ""
+lxd_ipv6_network: ""
+lxd_ipv6_nat: "false"
+lxd_ipv6_proxy: "false"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/ha/openstack_user_config.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/ha/openstack_user_config.yml
index 6d2b490a..f36f6502 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/ha/openstack_user_config.yml
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/ha/openstack_user_config.yml
@@ -13,6 +13,7 @@ used_ips:
global_overrides:
internal_lb_vip_address: 172.29.236.222
external_lb_vip_address: 192.168.122.220
+ barbican_keys_backend: true
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
@@ -76,18 +77,18 @@ shared-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# repository (apt cache, python packages, etc)
repo-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
@@ -96,9 +97,9 @@ haproxy_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# rsyslog server
# log_hosts:
@@ -114,18 +115,27 @@ identity_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
+
+# barbican
+key-manager_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
# cinder api services
storage-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# glance
# The settings here are repeated for each infra host.
@@ -138,27 +148,27 @@ image_hosts:
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
@@ -169,52 +179,52 @@ compute-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# heat
orchestration_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# horizon
dashboard_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
-# tacker
-mano_hosts:
+# ceilometer
+metering-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# neutron server, agents (L3, etc)
network_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# nova hypervisors
compute_hosts:
compute00:
- ip: 172.29.236.14
+ ip: 172.29.236.12
compute01:
- ip: 172.29.236.15
+ ip: 172.29.236.13
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
@@ -233,10 +243,10 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
container_vars:
cinder_backends:
limit_container_types: cinder_volume
@@ -246,10 +256,10 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
container_vars:
cinder_backends:
limit_container_types: cinder_volume
@@ -259,5 +269,5 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/mini/openstack_user_config.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/mini/openstack_user_config.yml
index ac17d89d..09d6aa37 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/mini/openstack_user_config.yml
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/mini/openstack_user_config.yml
@@ -13,6 +13,7 @@ used_ips:
global_overrides:
internal_lb_vip_address: 172.29.236.11
external_lb_vip_address: 192.168.122.3
+ barbican_keys_backend: true
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
@@ -102,6 +103,11 @@ identity_hosts:
controller00:
ip: 172.29.236.11
+# barbican
+key-manager_hosts:
+ controller00:
+ ip: 172.29.236.11
+
# cinder api services
storage-infra_hosts:
controller00:
@@ -139,8 +145,8 @@ dashboard_hosts:
controller00:
ip: 172.29.236.11
-# tacker
-mano_hosts:
+# ceilometer
+metering-infra_hosts:
controller00:
ip: 172.29.236.11
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/noha/openstack_user_config.yml
index ee8889d2..d914991e 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/noha/openstack_user_config.yml
@@ -13,6 +13,7 @@ used_ips:
global_overrides:
internal_lb_vip_address: 172.29.236.11
external_lb_vip_address: 192.168.122.3
+ barbican_keys_backend: true
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
@@ -102,6 +103,11 @@ identity_hosts:
controller00:
ip: 172.29.236.11
+# barbican
+key-manager_hosts:
+ controller00:
+ ip: 172.29.236.11
+
# cinder api services
storage-infra_hosts:
controller00:
@@ -139,8 +145,8 @@ dashboard_hosts:
controller00:
ip: 172.29.236.11
-# tacker
-mano_hosts:
+# ceilometer
+metering-infra_hosts:
controller00:
ip: 172.29.236.11
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/configure-opnfvhost.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/configure-opnfvhost.yml
new file mode 100644
index 00000000..3a0226b0
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/configure-opnfvhost.yml
@@ -0,0 +1,74 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: install OSM required packages
+ package:
+ name: "{{ osm_required_packages[ansible_pkg_mgr] }}"
+ state: present
+
+- name: initialize LXD
+ command: "{{ item }}"
+ with_items:
+ - lxd init --auto
+ - lxd waitready
+ changed_when: False
+- name: stop lxd-bridge service
+ systemd:
+ name: lxd-bridge
+ state: stopped
+ daemon_reload: yes
+- name: create lxd-bridge configuration
+ template:
+ src: lxd-bridge.j2
+ dest: /etc/default/lxd-bridge
+ mode: 0755
+
+- name: ensure dnsmasq service is stopped before attempting to start lxd-bridge
+ service:
+ name: dnsmasq
+ state: stopped
+
+- name: ensure dnsmasq uses interface br-vlan for lxd-bridge
+ lineinfile:
+ path: /etc/dnsmasq.conf
+ regexp: '^interface='
+ line: 'interface=br-vlan'
+
+- name: ensure docker and lxd-bridge services are started and enabled
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items:
+ - docker
+ - lxd-bridge
+
+- name: get default interface
+ shell: route -n | awk '$1~/^0.0.0.0/ {print $8}'
+ register: default_interface
+ ignore_errors: False
+ changed_when: False
+
+- name: get mtu of the default interface {{ default_interface.stdout }}
+ shell: ip addr show {{ default_interface.stdout }} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}'
+ register: default_interface_mtu
+ ignore_errors: False
+ changed_when: False
+
+- name: set lxdbr0 mtu to {{ default_interface_mtu.stdout }}
+ command: ifconfig lxdbr0 mtu {{ default_interface_mtu.stdout }}
+ ignore_errors: False
+ changed_when: False
+
+- name: add devuser to lxd and docker groups
+ user:
+ name: devuser
+ groups: lxd, docker
+ append: yes
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/copy-OSA-config-files.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/copy-OSA-config-files.yml
new file mode 100644
index 00000000..96592051
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/copy-OSA-config-files.yml
@@ -0,0 +1,20 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Venkata Harshavardhan Reddy Allu and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: copy user_sfc_scenarios_variables.yml (Ubuntu)
+ template:
+ src: "{{xci_flavor}}/user_sfc_scenarios_variables_ubuntu.yml.j2"
+ dest: "{{openstack_osa_etc_path}}/user_sfc_scenarios_variables.yml"
+ when: ansible_pkg_mgr == 'apt'
+
+- name: copy openstack_user_config.yml
+ copy:
+ src: "{{xci_flavor}}/openstack_user_config.yml"
+ dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/install-osm.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/install-osm.yml
new file mode 100644
index 00000000..5c12e333
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/install-osm.yml
@@ -0,0 +1,32 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: clone OSM devops repo and checkout version {{ osm_devops_version }}
+ become_user: "{{ osm_install_user }}"
+ become: yes
+ git:
+ repo: "{{ osm_devops_git_url }}"
+ dest: "{{ osm_devops_clone_location }}"
+ version: "{{ osm_devops_version }}"
+
+- name: install OSM
+ become_user: "{{ osm_install_user }}"
+ become: yes
+ command: "/bin/bash ./full_install_osm.sh --test -b {{ osm_devops_version }} --nolxd -y"
+ args:
+ chdir: "{{ osm_devops_clone_location }}/installers"
+ creates: "/usr/bin/osm"
+
+- name: create osmrc file
+ copy:
+ dest: "{{ osmrc_file_dest }}"
+ content: |
+ export OSM_HOSTNAME=127.0.0.1
+ export OSM_OL005=True
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/main.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/main.yml
new file mode 100644
index 00000000..e8a3ea7f
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 SUSE Linux GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: Copy the OSA config files
+ include: copy-OSA-config-files.yml
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/post-deployment.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/post-deployment.yml
new file mode 100644
index 00000000..a181ce77
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/post-deployment.yml
@@ -0,0 +1,27 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: Configure opnfv host
+ include: configure-opnfvhost.yml
+
+- name: Install OSM
+ include: install-osm.yml
+
+- name: Register OpenStack as VIM
+ include: register-vim.yml
+
+# fetch ODL variables for functest
+- name: Fetch the ip of the neutron server container
+ shell: 'grep controller00_neutron_server_container -n1 /etc/openstack_deploy/openstack_inventory.json | grep ansible_host | cut -d":" -f2 | cut -d "\"" -f2'
+ register: ip
+ changed_when: False
+- name: Fetch the ml2_conf.ini to process ODL variables
+ command: "scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {{ ip.stdout }}:/etc/neutron/plugins/ml2/ml2_conf.ini /tmp/ml2_conf.ini"
+ changed_when: False
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/register-vim.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/register-vim.yml
new file mode 100644
index 00000000..07e044bf
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/register-vim.yml
@@ -0,0 +1,30 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Venkata Harshavardhan Reddy Allu and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# This is a simple fix to wait for the OSM services in
+# the docker containers to start functioning as expected.
+# TODO: Once healthchecks are added to the OSM
+# container stack, use them to identify the status
+# of the containers and modify this task.
+- name: Wait till the OSM services are ready
+ wait_for: timeout=120
+ delegate_to: localhost
+
+- name: Register OpenStack as VIM
+ shell: ". {{ osmrc_file_dest }} ;
+ osm vim-create \
+ --name openstack-site \
+ --user admin \
+ --password {{ openrc_os_password }} \
+ --tenant admin \
+ --account_type openstack \
+ --auth_url {{ openrc_os_auth_url }} \
+ --config='{insecure: true}'"
+ changed_when: False
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_pike.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2
index 002db2b1..c5b1f19b 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_pike.yml
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -18,11 +18,12 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
-haproxy_keepalived_external_vip_cidr: "192.168.122.220/32"
-haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32"
+haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
haproxy_keepalived_external_interface: br-vlan
haproxy_keepalived_internal_interface: br-mgmt
gnocchi_db_sync_options: ""
@@ -43,7 +44,7 @@ neutron_opendaylight_conf_ini_overrides:
username: "admin"
password: "admin"
port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ internal_lb_vip_address }}:8080/controller/nb/v2/neutron"
+ url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8180/controller/nb/v2/neutron"
neutron_plugin_base:
- odl-router_v2
- metering
@@ -101,3 +102,30 @@ provider_networks:
- cinder_api
- cinder_volume
- nova_compute
+
+# This repo is used for OVS 2.9.2
+user_external_repos_list:
+ - repo: 'deb http://ppa.launchpad.net/mardim/mardim-ppa/ubuntu xenial main'
+
+user_external_repo_keys_list:
+ - id: 6E2EEDF1A3925D9D727EB1176FAD8BA42AAAEB9F
+ keyserver: keyserver.ubuntu.com
+
+openstack_host_specific_kernel_modules:
+ - name: openvswitch
+
+ovs_nsh_required_metal_packages:
+ - python-six
+ - python3-six
+ - linux-headers-{{ ansible_kernel }}
+ - openvswitch-datapath-dkms
+
+openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/lxd-bridge.j2 b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/lxd-bridge.j2
new file mode 100644
index 00000000..707cc465
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/lxd-bridge.j2
@@ -0,0 +1,16 @@
+USE_LXD_BRIDGE="true"
+LXD_BRIDGE="{{ lxd_bridge }}"
+UPDATE_PROFILE="true"
+LXD_CONFILE=""
+LXD_DOMAIN="{{ lxd_domain }}"
+LXD_IPV4_ADDR="{{ lxd_ipv4_addr }}"
+LXD_IPV4_NETMASK="{{ lxd_ipv4_netmask }}"
+LXD_IPV4_NETWORK="{{ lxd_ipv4_network }}"
+LXD_IPV4_DHCP_RANGE="{{ lxd_ipv4_dhcp_range }}"
+LXD_IPV4_DHCP_MAX="{{ lxd_ipv4_dhcp_max }}"
+LXD_IPV4_NAT="{{ lxd_ipv4_nat }}"
+LXD_IPV6_ADDR="{{ lxd_ipv6_addr }}"
+LXD_IPV6_MASK="{{ lxd_ipv6_mask }}"
+LXD_IPV6_NETWORK="{{ lxd_ipv6_network }}"
+LXD_IPV6_NAT="{{ lxd_ipv6_nat }}"
+LXD_IPV6_PROXY="{{ lxd_ipv6_proxy }}"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_pike.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2
index 4ee48807..0194456e 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_pike.yml
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -42,7 +43,7 @@ neutron_opendaylight_conf_ini_overrides:
username: "admin"
password: "admin"
port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8080/controller/nb/v2/neutron"
+ url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8180/controller/nb/v2/neutron"
neutron_plugin_base:
- odl-router_v2
- metering
@@ -100,3 +101,30 @@ provider_networks:
- cinder_api
- cinder_volume
- nova_compute
+
+# This repo is for ovs 2.9.2
+user_external_repos_list:
+ - repo: 'deb http://ppa.launchpad.net/mardim/mardim-ppa/ubuntu xenial main'
+
+user_external_repo_keys_list:
+ - id: 6E2EEDF1A3925D9D727EB1176FAD8BA42AAAEB9F
+ keyserver: keyserver.ubuntu.com
+
+openstack_host_specific_kernel_modules:
+ - name: openvswitch
+
+ovs_nsh_required_metal_packages:
+ - python-six
+ - python3-six
+ - linux-headers-{{ ansible_kernel }}
+ - openvswitch-datapath-dkms
+
+openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_pike.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2
index 4ee48807..1ec821d5 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_pike.yml
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -42,7 +43,7 @@ neutron_opendaylight_conf_ini_overrides:
username: "admin"
password: "admin"
port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8080/controller/nb/v2/neutron"
+ url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8180/controller/nb/v2/neutron"
neutron_plugin_base:
- odl-router_v2
- metering
@@ -100,3 +101,30 @@ provider_networks:
- cinder_api
- cinder_volume
- nova_compute
+
+# This repo is used for ovs 2.9.2
+user_external_repos_list:
+ - repo: 'deb http://ppa.launchpad.net/mardim/mardim-ppa/ubuntu xenial main'
+
+user_external_repo_keys_list:
+ - id: 6E2EEDF1A3925D9D727EB1176FAD8BA42AAAEB9F
+ keyserver: keyserver.ubuntu.com
+
+openstack_host_specific_kernel_modules:
+ - name: openvswitch
+
+ovs_nsh_required_metal_packages:
+ - python-six
+ - python3-six
+ - linux-headers-{{ ansible_kernel }}
+ - openvswitch-datapath-dkms
+
+openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/vars/main.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/vars/main.yml
new file mode 100644
index 00000000..41051830
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/vars/main.yml
@@ -0,0 +1,27 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+odl_repo_version: "{{ lookup('env','ODL_VERSION') }}"
+
+osm_required_packages:
+ apt:
+ - apt-transport-https
+ - ca-certificates
+ - software-properties-common
+ - docker.io
+ - snapd
+ - lxd
+
+osm_devops_version: "master"
+osm_devops_git_url: "https://osm.etsi.org/gerrit/osm/devops.git"
+osm_devops_clone_location: "/home/{{ osm_install_user }}/osm-devops"
+osm_install_user: "devuser"
+
+osmrc_file_dest: "/root/osmrc"
diff --git a/scenarios/os-odl-sfc_osm/xci_overrides b/scenarios/os-odl-sfc_osm/xci_overrides
new file mode 100644
index 00000000..ecbff0ee
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/xci_overrides
@@ -0,0 +1,10 @@
+if [[ $XCI_FLAVOR == "ha" ]]; then
+ export VM_MEMORY_SIZE=20480
+else
+ export VM_MEMORY_SIZE=16384
+fi
+
+# Until this feature is developed, ODL_VERSION must be intialized:
+# https://github.com/ansible/ansible/issues/17329
+# otherwise the lookup in vars/main returns an empty string when not defined
+export ODL_VERSION=${ODL_VERSION:-latest_release}
diff --git a/setup.cfg b/setup.cfg
index ed4d2104..d1d320cc 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -8,3 +8,5 @@ packages = sfc
[entry_points]
console_scripts =
run_sfc_tests = sfc.tests.functest.run_sfc_tests:main
+xtesting.testcase =
+ functest-odl-sfc = sfc.tests.functest.run_sfc_tests:SfcFunctest
diff --git a/sfc/lib/cleanup.py b/sfc/lib/cleanup.py
index e614867d..e97034ad 100644
--- a/sfc/lib/cleanup.py
+++ b/sfc/lib/cleanup.py
@@ -3,6 +3,7 @@ import sys
import time
import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
+from openstack import connection
logger = logging.getLogger(__name__)
@@ -73,15 +74,6 @@ def delete_vims():
os_sfc_utils.delete_vim(t, vim_id=vim)
-# Creators is a list full of SNAPs objects
-def delete_openstack_objects(creators):
- for creator in reversed(creators):
- try:
- creator.clean()
- except Exception as e:
- logger.error('Unexpected error cleaning - %s', e)
-
-
# Networking-odl generates a new security group when creating a router
# which is not tracked by SNAPs
def delete_untracked_security_groups():
@@ -91,32 +83,79 @@ def delete_untracked_security_groups():
def cleanup_odl(odl_ip, odl_port):
delete_odl_resources(odl_ip, odl_port, 'service-function-forwarder')
- delete_odl_resources(odl_ip, odl_port, 'service-function-chain')
- delete_odl_resources(odl_ip, odl_port, 'service-function-path')
- delete_odl_resources(odl_ip, odl_port, 'service-function')
+ # delete_odl_resources(odl_ip, odl_port, 'service-function-chain')
+ # delete_odl_resources(odl_ip, odl_port, 'service-function-path')
+ # delete_odl_resources(odl_ip, odl_port, 'service-function')
delete_odl_ietf_access_lists(odl_ip, odl_port)
-def cleanup(creators, odl_ip=None, odl_port=None):
+def cleanup_nsfc_objects():
+ '''
+ cleanup the networking-sfc objects created for the test
+ '''
+ # TODO Add n-sfc to snaps so that it can be removed through
+ # delete_openstack_objects
+ openstack_sfc = os_sfc_utils.OpenStackSFC()
+ openstack_sfc.delete_chain()
+ openstack_sfc.delete_port_groups()
+
+
+def cleanup_tacker_objects():
+ '''
+ cleanup the tacker objects created for the test
+ '''
delete_vnffgs()
delete_vnffgds()
delete_vnfs()
time.sleep(20)
delete_vnfds()
delete_vims()
- delete_openstack_objects(creators)
+
+
+def cleanup_mano_objects(mano):
+ '''
+ Cleanup the mano objects (chains, classifiers, etc)
+ '''
+ if mano == 'tacker':
+ cleanup_tacker_objects()
+ elif mano == 'no-mano':
+ cleanup_nsfc_objects()
+
+
+def delete_openstack_objects(testcase_config, creators):
+ conn = connection.from_config(verify=False)
+ for creator in creators:
+ if creator.name == testcase_config.subnet_name:
+ subnet_obj = creator
+
+ for creator in reversed(creators):
+ try:
+ logger.info("Deleting " + creator.name)
+ if creator.name == testcase_config.router_name:
+ logger.info("Removing subnet from router")
+ conn.network.remove_interface_from_router(
+ creator.id, subnet_obj.id)
+ time.sleep(2)
+ logger.info("Deleting router")
+ conn.network.delete_router(creator)
+ else:
+ creator.delete(conn.session)
+ time.sleep(2)
+ creators.remove(creator)
+ except Exception as e:
+ logger.error('Unexpected error cleaning - %s', e)
+
+
+def cleanup(testcase_config, creators, mano, odl_ip=None, odl_port=None):
+ cleanup_mano_objects(mano)
+ delete_openstack_objects(testcase_config, creators)
delete_untracked_security_groups()
if odl_ip is not None and odl_port is not None:
cleanup_odl(odl_ip, odl_port)
-def cleanup_from_bash(odl_ip=None, odl_port=None):
- delete_vnffgs()
- delete_vnffgds()
- delete_vnfs()
- time.sleep(20)
- delete_vnfds()
- delete_vims()
+def cleanup_from_bash(odl_ip=None, odl_port=None, mano='no-mano'):
+ cleanup_mano_objects(mano=mano)
if odl_ip is not None and odl_port is not None:
cleanup_odl(odl_ip, odl_port)
diff --git a/sfc/lib/config.py b/sfc/lib/config.py
index 541f6847..bf9864a5 100644
--- a/sfc/lib/config.py
+++ b/sfc/lib/config.py
@@ -46,33 +46,55 @@ class CommonConfig(object):
self.config_file = os.path.join(self.sfc_test_dir, "config.yaml")
self.vim_file = os.path.join(self.sfc_test_dir, "register-vim.json")
- self.installer_type = env.get('INSTALLER_TYPE')
-
- self.installer_fields = test_utils.fill_installer_dict(
- self.installer_type)
-
- self.installer_ip = env.get('INSTALLER_IP')
-
- self.installer_user = ft_utils.get_parameter_from_yaml(
- self.installer_fields['user'], self.config_file)
-
- try:
- self.installer_password = ft_utils.get_parameter_from_yaml(
- self.installer_fields['password'], self.config_file)
- except Exception:
- self.installer_password = None
-
- try:
- self.installer_key_file = ft_utils.get_parameter_from_yaml(
- self.installer_fields['pkey_file'], self.config_file)
- except Exception:
- self.installer_key_file = None
-
- try:
- self.installer_cluster = ft_utils.get_parameter_from_yaml(
- self.installer_fields['cluster'], self.config_file)
- except Exception:
+ pod_yaml_exists = os.path.isfile(self.sfc_test_dir + "/pod.yaml")
+
+ if pod_yaml_exists:
+ self.pod_file = os.path.join(self.sfc_test_dir, "pod.yaml")
+ self.nodes_pod = ft_utils.get_parameter_from_yaml(
+ "nodes", self.pod_file)
+ self.host_ip = self.nodes_pod[0]['ip']
+ self.host_user = self.nodes_pod[0]['user']
+
+ self.installer_type = 'configByUser'
+ self.installer_ip = self.host_ip
+ self.installer_user = self.host_user
self.installer_cluster = None
+ try:
+ self.installer_password = self.host_ip[0]['password']
+ except Exception:
+ self.installer_password = None
+
+ try:
+ self.installer_key_file = self.host_ip[0]['key_filename']
+ except Exception:
+ self.installer_key_file = None
+ else:
+ self.nodes_pod = None
+ self.host_ip = None
+ self.installer_type = env.get('INSTALLER_TYPE')
+ self.installer_fields = test_utils.fill_installer_dict(
+ self.installer_type)
+ self.installer_ip = env.get('INSTALLER_IP')
+ self.installer_user = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['user'], self.config_file)
+
+ try:
+ self.installer_password = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['password'], self.config_file)
+ except Exception:
+ self.installer_password = None
+
+ try:
+ self.installer_key_file = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['pkey_file'], self.config_file)
+ except Exception:
+ self.installer_key_file = None
+
+ try:
+ self.installer_cluster = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['cluster'], self.config_file)
+ except Exception:
+ self.installer_cluster = None
self.flavor = ft_utils.get_parameter_from_yaml(
"defaults.flavor", self.config_file)
@@ -88,6 +110,8 @@ class CommonConfig(object):
"defaults.image_format", self.config_file)
self.image_url = ft_utils.get_parameter_from_yaml(
"defaults.image_url", self.config_file)
+ self.mano_component = ft_utils.get_parameter_from_yaml(
+ "defaults.mano_component", self.config_file)
try:
self.vnf_image_name = ft_utils.get_parameter_from_yaml(
"defaults.vnf_image_name", self.config_file)
diff --git a/sfc/lib/odl_utils.py b/sfc/lib/odl_utils.py
index 7879eab9..2c657a13 100644
--- a/sfc/lib/odl_utils.py
+++ b/sfc/lib/odl_utils.py
@@ -10,7 +10,8 @@ import time
import sfc.lib.openstack_utils as os_sfc_utils
logger = logging.getLogger(__name__)
-
+odl_username = 'admin'
+odl_password = 'admin'
ODL_MODULE_EXCEPTIONS = {
"service-function-path-state": "service-function-path"
@@ -24,15 +25,16 @@ ODL_PLURAL_EXCEPTIONS = {
def actual_rsps_in_compute(ovs_logger, compute_ssh):
'''
Example flows that match the regex (line wrapped because of flake8)
- table=101, n_packets=7, n_bytes=595, priority=500,tcp,in_port=2,tp_dst=80
- actions=push_nsh,load:0x1->NXM_NX_NSH_MDTYPE[],load:0x3->NXM_NX_NSH_NP[],
- load:0x27->NXM_NX_NSP[0..23],load:0xff->NXM_NX_NSI[],
- load:0xffffff->NXM_NX_NSH_C1[],load:0->NXM_NX_NSH_C2[],resubmit(,17)
+ cookie=0xf005ba1100000002, duration=5.843s, table=101, n_packets=0,
+ n_bytes=0, priority=500,tcp,in_port=48,tp_dst=80
+ actions=load:0x169->NXM_NX_REG2[8..31],load:0xff->NXM_NX_REG2[0..7],
+ resubmit(,17)', u' cookie=0xf005ba1100000002, duration=5.825s, table=101,
+ n_packets=2, n_bytes=684, priority=10 actions=resubmit(,17)
'''
match_rsp = re.compile(r'.+'
r'(tp_(?:src|dst)=[0-9]+)'
r'.+'
- r'load:(0x[0-9a-f]+)->NXM_NX_NSP\[0\.\.23\]'
+ r'actions=load:(0x[0-9a-f]+)->NXM_NX_REG2'
r'.+')
# First line is OFPST_FLOW reply (OF1.3) (xid=0x2):
# This is not a flow so ignore
@@ -64,7 +66,7 @@ def get_active_rsps_on_ports(odl_ip, odl_port, neutron_ports):
# We get the first ace. ODL creates a new ACL
# with one ace for each classifier
ace = acl['access-list-entries']['ace'][0]
- except:
+ except Exception:
logger.warn('ACL {0} does not have an ACE'.format(
acl['acl-name']))
continue
@@ -200,6 +202,11 @@ def wait_for_classification_rules(ovs_logger, compute_nodes, odl_ip, odl_port,
time.sleep(3)
while timeout > 0:
+ # When swapping classifiers promised_rsps update takes time to
+ # get updated
+ # TODO: Need to optimise this code
+ promised_rsps = promised_rsps_in_compute(odl_ip, odl_port,
+ neutron_ports)
logger.info("RSPs in ODL Operational DataStore"
"for compute '{}':".format(compute_name))
logger.info("{0}".format(promised_rsps))
@@ -245,6 +252,36 @@ def get_odl_ip_port(nodes):
return ip, port
+def get_odl_ip_port_no_installer(nodes_pod):
+ node_index = 0
+ for n in nodes_pod:
+ if n['role'] == 'Controller':
+ break
+ node_index += 1
+ remote_ml2_conf_etc = '/etc/neutron/plugins/ml2/ml2_conf.ini'
+ os.system('scp {0}@{1}:{2} .'.
+ format(nodes_pod[node_index]['user'],
+ nodes_pod[node_index]['ip'],
+ remote_ml2_conf_etc))
+ file = open('ml2_conf.ini', 'r')
+ string = re.findall(r'[0-9]+(?:\.[0-9]+){3}\:[0-9]+', file.read())
+ file.close()
+ ip = string[0].split(':')[0]
+ port = string[0].split(':')[1]
+ return ip, port
+
+
+def get_odl_username_password():
+ local_ml2_conf_file = os.path.join(os.getcwd(), 'ml2_conf.ini')
+ con_par = ConfigParser.RawConfigParser()
+ con_par.read(local_ml2_conf_file)
+ global odl_username
+ odl_username = con_par.get('ml2_odl', 'username')
+ global odl_password
+ odl_password = con_par.get('ml2_odl', 'password')
+ return odl_username, odl_password
+
+
def pluralize(resource):
plural = ODL_PLURAL_EXCEPTIONS.get(resource, None)
if not plural:
@@ -260,11 +297,11 @@ def get_module(resource):
def format_odl_resource_list_url(odl_ip, odl_port, resource,
- datastore='config', odl_user='admin',
- odl_pwd='admin'):
+ datastore='config', odl_user=odl_username,
+ odl_pwd=odl_password):
return ('http://{usr}:{pwd}@{ip}:{port}/restconf/{ds}/{rsrc}:{rsrcs}'
- .format(usr=odl_user, pwd=odl_pwd, ip=odl_ip, port=odl_port,
- ds=datastore, rsrc=get_module(resource),
+ .format(usr=odl_username, pwd=odl_password, ip=odl_ip,
+ port=odl_port, ds=datastore, rsrc=get_module(resource),
rsrcs=pluralize(resource)))
@@ -314,10 +351,10 @@ def odl_acl_types_names(acl_json):
def format_odl_acl_list_url(odl_ip, odl_port,
- odl_user='admin', odl_pwd='admin'):
+ odl_user=odl_username, odl_pwd=odl_password):
acl_list_url = ('http://{usr}:{pwd}@{ip}:{port}/restconf/config/'
'ietf-access-control-list:access-lists'
- .format(usr=odl_user, pwd=odl_pwd,
+ .format(usr=odl_username, pwd=odl_password,
ip=odl_ip, port=odl_port))
return acl_list_url
@@ -410,7 +447,8 @@ def check_vnffg_deletion(odl_ip, odl_port, ovs_logger, neutron_ports,
try:
compute = find_compute(compute_client_name, compute_nodes)
except Exception as e:
- logger.debug("There was an error getting the compute: e" % e)
+ logger.debug("There was an error getting the compute: %s" % e)
+ return False
retries_counter = retries
@@ -427,20 +465,3 @@ def check_vnffg_deletion(odl_ip, odl_port, ovs_logger, neutron_ports,
return False
return True
-
-
-def create_chain(tacker_client, default_param_file, neutron_port,
- COMMON_CONFIG, TESTCASE_CONFIG):
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- TESTCASE_CONFIG.test_vnffgd_red)
-
- os_sfc_utils.create_vnffgd(tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='red')
-
- os_sfc_utils.create_vnffg_with_param_file(tacker_client, 'red',
- 'red_http',
- default_param_file,
- neutron_port.id)
diff --git a/sfc/lib/openstack_utils.py b/sfc/lib/openstack_utils.py
index 09b93f37..c46ff123 100644
--- a/sfc/lib/openstack_utils.py
+++ b/sfc/lib/openstack_utils.py
@@ -3,35 +3,29 @@ import time
import json
import logging
import yaml
+import urllib2
from tackerclient.tacker import client as tackerclient
from functest.utils import constants
from functest.utils import env
from snaps.openstack.tests import openstack_tests
-from snaps.openstack.create_image import OpenStackImage
-from snaps.config.image import ImageConfig
-from snaps.config.flavor import FlavorConfig
-from snaps.openstack.create_flavor import OpenStackFlavor
-from snaps.config.network import NetworkConfig, SubnetConfig, PortConfig
-from snaps.openstack.create_network import OpenStackNetwork
-from snaps.config.router import RouterConfig
-from snaps.openstack.create_router import OpenStackRouter
-from snaps.config.security_group import (
- Protocol, SecurityGroupRuleConfig, Direction, SecurityGroupConfig)
-from snaps.openstack.create_security_group import OpenStackSecurityGroup
+from snaps.config.vm_inst import FloatingIpConfig
import snaps.openstack.create_instance as cr_inst
-from snaps.config.vm_inst import VmInstanceConfig, FloatingIpConfig
from snaps.openstack.utils import (
nova_utils, neutron_utils, heat_utils, keystone_utils)
+from openstack import connection
+from neutronclient.neutron import client as neutronclient
logger = logging.getLogger(__name__)
DEFAULT_TACKER_API_VERSION = '1.0'
+DEFAULT_API_VERSION = '2'
class OpenStackSFC:
def __init__(self):
+ self.conn = self.get_os_connection()
self.os_creds = openstack_tests.get_credentials(
os_env_file=constants.ENV_FILE)
self.creators = []
@@ -39,129 +33,191 @@ class OpenStackSFC:
self.neutron = neutron_utils.neutron_client(self.os_creds)
self.heat = heat_utils.heat_client(self.os_creds)
self.keystone = keystone_utils.keystone_client(self.os_creds)
+ self.neutron_client = neutronclient.\
+ Client(self.get_neutron_client_version(),
+ session=self.conn.session)
+
+ def get_os_connection(self):
+ return connection.from_config(verify=False)
+
+ def get_neutron_client_version(self):
+ api_version = os.getenv('OS_NETWORK_API_VERSION')
+ if api_version is not None:
+ logger.info("OS_NETWORK_API_VERSION is %s" % api_version)
+ return api_version
+ return DEFAULT_API_VERSION
def register_glance_image(self, name, url, img_format, public):
logger.info("Registering the image...")
- # Check whether the image is local or not
- if 'http' in url:
- image_settings = ImageConfig(name=name,
- img_format=img_format,
- url=url,
- public=public,
- image_user='admin')
+ image = self.conn.image.find_image(name)
+ if image:
+ logger.info("Image %s already exists." % image.name)
else:
- image_settings = ImageConfig(name=name,
- img_format=img_format,
- image_file=url,
- public=public,
- image_user='admin')
+ if 'http' in url:
+ logger.info("Downloading image")
+ response = urllib2.urlopen(url)
+ image_data = response.read()
+ else:
+ with open(url) as f:
+ image_data = f.read()
- # TODO Remove this when tacker is part of SNAPS
- self.image_settings = image_settings
+ image_settings = {'name': name,
+ 'disk_format': img_format,
+ 'data': image_data,
+ 'is_public': public,
+ 'container_format': 'bare'}
+ image = self.conn.image.upload_image(**image_settings)
+ self.creators.append(image)
+ logger.info("Image created")
- image_creator = OpenStackImage(self.os_creds, image_settings)
- image_creator.create()
+ self.image_settings = image_settings
- self.creators.append(image_creator)
- return image_creator
+ return image
def create_flavor(self, name, ram, disk, vcpus):
- logger.info("Creating the flavor...")
- flavor_settings = FlavorConfig(name=name, ram=ram, disk=disk,
- vcpus=vcpus)
- flavor_creator = OpenStackFlavor(self.os_creds, flavor_settings)
- flavor = flavor_creator.create()
+ logger.info("Creating flavor...")
+ flavor_settings = {"name": name, "ram": ram, "disk": disk,
+ "vcpus": vcpus}
+
+ flavor = self.conn.compute.create_flavor(**flavor_settings)
- self.creators.append(flavor_creator)
+ self.creators.append(flavor)
return flavor
def create_network_infrastructure(self, net_name, subnet_name, subnet_cidr,
router_name):
- logger.info("Creating networks...")
+ logger.info("Creating Networks...")
# Network and subnet
- subnet_settings = SubnetConfig(name=subnet_name, cidr=subnet_cidr)
- network_settings = NetworkConfig(name=net_name,
- subnet_settings=[subnet_settings])
- network_creator = OpenStackNetwork(self.os_creds, network_settings)
- network = network_creator.create()
+ network = self.conn.network.create_network(name=net_name)
+ self.creators.append(network)
- self.creators.append(network_creator)
+ subnet_settings = {"name": subnet_name, "cidr": subnet_cidr,
+ "network_id": network.id, 'ip_version': '4'}
+ subnet = self.conn.network.create_subnet(**subnet_settings)
+ self.creators.append(subnet)
# Router
- logger.info("Creating the router...")
ext_network_name = env.get('EXTERNAL_NETWORK')
+ ext_net = self.conn.network.find_network(ext_network_name)
+ router_dict = {'network_id': ext_net.id}
- router_settings = RouterConfig(name=router_name,
- external_gateway=ext_network_name,
- internal_subnets=[subnet_name])
+ logger.info("Creating Router...")
+ router = self.conn.network.create_router(name=router_name)
- router_creator = OpenStackRouter(self.os_creds, router_settings)
- router = router_creator.create()
+ self.conn.network.add_interface_to_router(router.id,
+ subnet_id=subnet.id)
- self.creators.append(router_creator)
+ self.conn.network.update_router(router.id,
+ external_gateway_info=router_dict)
+ router_obj = self.conn.network.get_router(router.id)
+ self.creators.append(router_obj)
- return network, router
+ return network, router_obj
def create_security_group(self, sec_grp_name):
logger.info("Creating the security groups...")
- rule_ping = SecurityGroupRuleConfig(sec_grp_name=sec_grp_name,
- direction=Direction.ingress,
- protocol=Protocol.icmp)
-
- rule_ssh = SecurityGroupRuleConfig(sec_grp_name=sec_grp_name,
- direction=Direction.ingress,
- protocol=Protocol.tcp,
- port_range_min=22,
- port_range_max=22)
-
- rule_http = SecurityGroupRuleConfig(sec_grp_name=sec_grp_name,
- direction=Direction.ingress,
- protocol=Protocol.tcp,
- port_range_min=80,
- port_range_max=80)
+ sec_group = self.conn.network.create_security_group(name=sec_grp_name)
- rules = [rule_ping, rule_ssh, rule_http]
+ rule_ping = {"security_group_id": sec_group.id,
+ "direction": "ingress",
+ "protocol": "icmp"}
+
+ rule_ssh = {"security_group_id": sec_group.id,
+ "direction": "ingress",
+ "protocol": "tcp",
+ "port_range_min": 22,
+ "port_range_max": 22}
- secgroup_settings = SecurityGroupConfig(name=sec_grp_name,
- rule_settings=rules)
+ rule_http = {"security_group_id": sec_group.id,
+ "direction": "ingress",
+ "protocol": "tcp",
+ "port_range_min": 80,
+ "port_range_max": 80}
- sec_group_creator = OpenStackSecurityGroup(self.os_creds,
- secgroup_settings)
- sec_group = sec_group_creator.create()
+ rules = [rule_ping, rule_ssh, rule_http]
- self.creators.append(sec_group_creator)
+ for rule in rules:
+ self.conn.network.create_security_group_rule(**rule)
+
+ self.creators.append(sec_group)
return sec_group
- def create_instance(self, vm_name, flavor_name, image_creator, network,
- secgrp, av_zone):
+ def create_instance(self, vm_name, flavor, image, network,
+ sec_group, av_zone, ports, port_security=True):
+ logger.info("Creating Key Pair {}...".format(vm_name))
+
+ keypair = self.conn.compute.\
+ create_keypair(name="{}_keypair".format(vm_name))
+ self.creators.append(keypair)
+ flavor_obj = self.conn.compute.find_flavor(flavor)
+
+ logger.info("Creating Port {}...".format(ports))
+ port_list = []
+ for port in ports:
+ if port_security:
+ port_obj = self.conn.network.create_port(
+ name=port, is_port_security_enabled=port_security,
+ network_id=network.id, security_group_ids=[sec_group.id])
+ else:
+ port_obj = self.conn.network.create_port(
+ name=port, is_port_security_enabled=port_security,
+ network_id=network.id)
+ port_list.append(port_obj)
+ self.creators.append(port_obj)
logger.info("Creating the instance {}...".format(vm_name))
- port_settings = PortConfig(name=vm_name + '-port',
- network_name=network.name)
- instance_settings = VmInstanceConfig(
- name=vm_name, flavor=flavor_name,
- security_group_names=str(secgrp.name),
- port_settings=[port_settings],
- availability_zone=av_zone)
+ if len(port_list) > 1:
+ network_list = [{"port": port_list[0].id},
+ {"port": port_list[1].id}]
+ else:
+ network_list = [{"port": port_obj.id}]
+
+ instance = self.conn.compute.create_server(name=vm_name,
+ image_id=image.id,
+ flavor_id=flavor_obj.id,
+ networks=network_list,
+ key_name=keypair.name,
+ availability_zone=av_zone)
+
+ logger.info("Waiting for {} to become Active".format(instance.name))
+ self.conn.compute.wait_for_server(instance)
+ logger.info("{} is active".format(instance.name))
- instance_creator = cr_inst.OpenStackVmInstance(
- self.os_creds,
- instance_settings,
- image_creator.image_settings)
+ self.creators.append(instance)
- instance = instance_creator.create()
+ return instance, port_list
- self.creators.append(instance_creator)
- return instance, instance_creator
+ def get_instance(self, instance_id):
+ """
+ Return a dictionary of metadata for a server instance
+ """
+ return self.conn.compute.get_server_metadata(instance_id)
def get_av_zones(self):
'''
Return the availability zone each host belongs to
'''
- hosts = nova_utils.get_hypervisor_hosts(self.nova)
+ hosts = self.get_hypervisor_hosts()
return ['nova::{0}'.format(host) for host in hosts]
+ def get_hypervisor_hosts(self):
+ """
+ Returns the host names of all nova nodes with active hypervisors
+ :param nova: the Nova client
+ :return: a list of hypervisor host names
+ """
+ try:
+ nodes = []
+ hypervisors = self.conn.compute.hypervisors()
+ for hypervisor in hypervisors:
+ if hypervisor.state == "up":
+ nodes.append(hypervisor.name)
+ return nodes
+ except Exception as e:
+ logger.error("Error [get_hypervisors(compute)]: %s" % e)
+ return None
+
def get_compute_client(self):
'''
Return the compute where the client sits
@@ -187,18 +243,37 @@ class OpenStackSFC:
raise Exception("There is no VM with name '{}'!!".format(vm_name))
- def assign_floating_ip(self, router, vm, vm_creator):
+ def get_port_by_ip(self, ip_address):
+ """
+ Return a dictionary of metadata for a port instance
+ by its ip_address
+ """
+
+ ports = self.conn.network.ports()
+ for port in ports:
+ if port.fixed_ips[0]['ip_address'] == ip_address:
+ return self.conn.network.get_port(port.id)
+
+ def assign_floating_ip(self, vm, vm_port):
'''
Assign floating ips to all the VMs
'''
- name = vm.name + "-float"
- port_name = vm.ports[0].name
- float_ip = FloatingIpConfig(name=name,
- port_name=port_name,
- router_name=router.name)
- ip = vm_creator.add_floating_ip(float_ip)
+ logger.info(" Creating floating ips ")
+
+ ext_network_name = env.get('EXTERNAL_NETWORK')
+ ext_net = self.conn.network.find_network(ext_network_name)
+
+ fip = self.conn.network.create_ip(floating_network_id=ext_net.id,
+ port_id=vm_port.id)
+ logger.info(
+ " FLoating IP address {} created".format(fip.floating_ip_address))
- return ip.ip
+ logger.info(" Adding Floating IPs to instances ")
+ self.conn.compute.add_floating_ip_to_server(
+ vm.id, fip.floating_ip_address)
+
+ self.creators.append(fip)
+ return fip.floating_ip_address
# We need this function because tacker VMs cannot be created through SNAPs
def assign_floating_ip_vnfs(self, router, ips=None):
@@ -213,7 +288,7 @@ class OpenStackSFC:
for stack in stacks:
servers = heat_utils.get_stack_servers(self.heat,
self.nova,
- self.neutron,
+ self.neutron_client,
self.keystone,
stack,
project_name)
@@ -233,10 +308,11 @@ class OpenStackSFC:
break
if port_name is None:
- err_msg = "The VNF {} does not have any suitable port {} " \
- "for floating IP assignment".format(
- name,
- 'with ip any of ' + str(ips) if ips else '')
+ err_msg = ("The VNF {} does not have any suitable port {} "
+ "for floating IP assignment"
+ .format(name,
+ 'with ip any of ' +
+ str(ips) if ips else ''))
logger.error(err_msg)
raise Exception(err_msg)
@@ -249,11 +325,12 @@ class OpenStackSFC:
return fips
- def get_client_port(self, vm, vm_creator):
+ def get_instance_port(self, vm, vm_creator, port_name=None):
'''
Get the neutron port id of the client
'''
- port_name = vm.name + "-port"
+ if not port_name:
+ port_name = vm.name + "-port"
port = vm_creator.get_port_by_name(port_name)
if port is not None:
return port
@@ -265,13 +342,209 @@ class OpenStackSFC:
def delete_all_security_groups(self):
'''
Deletes all the available security groups
-
Needed until this bug is fixed:
https://bugs.launchpad.net/networking-odl/+bug/1763705
'''
- sec_groups = neutron_utils.list_security_groups(self.neutron)
+ logger.info("Deleting remaining security groups...")
+ sec_groups = self.conn.network.security_groups()
for sg in sec_groups:
- neutron_utils.delete_security_group(self.neutron, sg)
+ self.conn.network.delete_security_group(sg)
+
+ def wait_for_vnf(self, vnf_creator):
+ '''
+ Waits for VNF to become active
+ '''
+ return vnf_creator.vm_active(block=True, poll_interval=5)
+
+ def create_port_groups(self, vnf_ports, vm_instance):
+ '''
+ Creates a networking-sfc port pair and group
+ '''
+ logger.info("Creating the port pairs...")
+ port_pair = dict()
+ port_pair['name'] = vm_instance.name + '-connection-points'
+ port_pair['description'] = 'port pair for ' + vm_instance.name
+
+ # In the symmetric testcase ingres != egress (VNF has 2 interfaces)
+ if len(vnf_ports) == 1:
+ port_pair['ingress'] = vnf_ports[0].id
+ port_pair['egress'] = vnf_ports[0].id
+ elif len(vnf_ports) == 2:
+ port_pair['ingress'] = vnf_ports[0].id
+ port_pair['egress'] = vnf_ports[1].id
+ else:
+ logger.error("Only SFs with one or two ports are supported")
+ raise Exception("Failed to create port pairs")
+ port_pair_info = \
+ self.neutron_client.create_sfc_port_pair({'port_pair': port_pair})
+ if not port_pair_info:
+ logger.warning("Chain creation failed due to port pair "
+ "creation failed for vnf %(vnf)s",
+ {'vnf': vm_instance.name})
+ return None
+
+ # Avoid race conditions by checking the port pair is already committed
+ iterations = 5
+ found_it = False
+ for _ in range(iterations):
+ pp_list = self.neutron_client.list_sfc_port_pairs()['port_pairs']
+ for pp in pp_list:
+ if pp['id'] == port_pair_info['port_pair']['id']:
+ found_it = True
+ break
+ if found_it:
+ break
+ else:
+ time.sleep(3)
+
+ if not found_it:
+ raise Exception("Port pair was not committed in openstack")
+
+ logger.info("Creating the port pair groups for %s" % vm_instance.name)
+
+ port_pair_group = {}
+ port_pair_group['name'] = vm_instance.name + '-port-pair-group'
+ port_pair_group['description'] = \
+ 'port pair group for ' + vm_instance.name
+ port_pair_group['port_pairs'] = []
+ port_pair_group['port_pairs'].append(port_pair_info['port_pair']['id'])
+ ppg_config = {'port_pair_group': port_pair_group}
+ port_pair_group_info = \
+ self.neutron_client.create_sfc_port_pair_group(ppg_config)
+ if not port_pair_group_info:
+ logger.warning("Chain creation failed due to port pair group "
+ "creation failed for vnf "
+ "{}".format(vm_instance.name))
+ return None
+
+ return port_pair_group_info['port_pair_group']['id']
+
+ def create_classifier(self, neutron_port, port, protocol, fc_name,
+ symmetrical, server_port=None, server_ip=None):
+ '''
+ Create the classifier
+ '''
+ logger.info("Creating the classifier...")
+
+ if symmetrical:
+ sfc_classifier_params = {'name': fc_name,
+ 'destination_ip_prefix': server_ip,
+ 'logical_source_port': neutron_port,
+ 'logical_destination_port': server_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ else:
+ sfc_classifier_params = {'name': fc_name,
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+
+ fc_config = {'flow_classifier': sfc_classifier_params}
+ self.neutron_client.create_sfc_flow_classifier(fc_config)
+
+ def create_chain(self, port_groups, neutron_port, port, protocol,
+ vnffg_name, symmetrical, server_port=None,
+ server_ip=None):
+ '''
+ Create the classifier
+ '''
+ logger.info("Creating the classifier...")
+
+ if symmetrical:
+ sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'destination_ip_prefix': server_ip,
+ 'logical_source_port': neutron_port,
+ 'logical_destination_port': server_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ else:
+ sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+
+ fc_config = {'flow_classifier': sfc_classifier_params}
+ fc_info = \
+ self.neutron_client.create_sfc_flow_classifier(fc_config)
+
+ logger.info("Creating the chain...")
+ port_chain = {}
+ port_chain['name'] = vnffg_name + '-port-chain'
+ port_chain['description'] = 'port-chain for SFC'
+ port_chain['port_pair_groups'] = port_groups
+ port_chain['flow_classifiers'] = []
+ port_chain['flow_classifiers'].append(fc_info['flow_classifier']['id'])
+ if symmetrical:
+ port_chain['chain_parameters'] = {}
+ port_chain['chain_parameters']['symmetric'] = True
+ chain_config = {'port_chain': port_chain}
+ return self.neutron_client.create_sfc_port_chain(chain_config)
+
+ def update_chain(self, vnffg_name, fc_name, symmetrical):
+ '''
+ Update the new Flow Classifier ID
+ '''
+ fc_id = self.neutron_client.find_resource('flow_classifier',
+ fc_name)['id']
+ logger.info("Update the chain...")
+ port_chain = {}
+ port_chain['name'] = vnffg_name + '-port-chain'
+ port_chain['flow_classifiers'] = []
+ port_chain['flow_classifiers'].append(fc_id)
+ if symmetrical:
+ port_chain['chain_parameters'] = {}
+ port_chain['chain_parameters']['symmetric'] = True
+ chain_config = {'port_chain': port_chain}
+ pc_id = self.neutron_client.find_resource('port_chain',
+ port_chain['name'])['id']
+ return self.neutron_client.update_sfc_port_chain(pc_id, chain_config)
+
+ def swap_classifiers(self, vnffg_1_name, vnffg_2_name, symmetric=False):
+
+ '''
+ Swap Classifiers
+ '''
+ logger.info("Swap classifiers...")
+
+ self.update_chain(vnffg_1_name, 'dummy', symmetric)
+ vnffg_1_classifier_name = vnffg_1_name + '-classifier'
+ self.update_chain(vnffg_2_name, vnffg_1_classifier_name, symmetric)
+ vnffg_2_classifier_name = vnffg_2_name + '-classifier'
+ self.update_chain(vnffg_1_name, vnffg_2_classifier_name, symmetric)
+
+ def delete_port_groups(self):
+ '''
+ Delete all port groups and port pairs
+ '''
+ logger.info("Deleting the port groups...")
+ ppg_list = self.neutron_client.\
+ list_sfc_port_pair_groups()['port_pair_groups']
+ for ppg in ppg_list:
+ self.neutron_client.delete_sfc_port_pair_group(ppg['id'])
+
+ logger.info("Deleting the port pairs...")
+ pp_list = self.neutron_client.list_sfc_port_pairs()['port_pairs']
+ for pp in pp_list:
+ self.neutron_client.delete_sfc_port_pair(pp['id'])
+
+ def delete_chain(self):
+ '''
+ Delete the classifiers and the chains
+ '''
+ logger.info("Deleting the chain...")
+ pc_list = self.neutron_client.list_sfc_port_chains()['port_chains']
+ for pc in pc_list:
+ self.neutron_client.delete_sfc_port_chain(pc['id'])
+
+ logger.info("Deleting the classifiers...")
+ fc_list = self.neutron_client.\
+ list_sfc_flow_classifiers()['flow_classifiers']
+ for fc in fc_list:
+ self.neutron_client.delete_sfc_flow_classifier(fc['id'])
# TACKER SECTION #
@@ -456,7 +729,6 @@ def get_vnf_ip(tacker_client, vnf_id=None, vnf_name=None):
"""
Get the management ip of the first VNF component as obtained from the
tacker REST API:
-
{
"vnf": {
...
@@ -473,7 +745,7 @@ def wait_for_vnf(tacker_client, vnf_id=None, vnf_name=None, timeout=100):
vnf = get_vnf(tacker_client, vnf_id, vnf_name)
if vnf is None:
raise Exception("Could not retrieve VNF - id='%s', name='%s'"
- % vnf_id, vnf_name)
+ % (vnf_id, vnf_name))
logger.info('Waiting for vnf {0}'.format(str(vnf)))
while vnf['status'] != 'ACTIVE' and timeout >= 0:
if vnf['status'] == 'ERROR':
@@ -673,8 +945,7 @@ def register_vim(tacker_client, vim_file=None):
create_vim(tacker_client, vim_file=tmp_file)
-def create_vnf_in_av_zone(
- tacker_client,
+def create_vnf_in_av_zone(tacker_client,
vnf_name,
vnfd_name,
vim_name,
@@ -686,9 +957,7 @@ def create_vnf_in_av_zone(
param_file = os.path.join(
'/tmp',
'param_{0}.json'.format(av_zone.replace('::', '_')))
- data = {
- 'zone': av_zone
- }
+ data = {'zone': av_zone}
with open(param_file, 'w+') as f:
json.dump(data, f)
create_vnf(tacker_client,
diff --git a/sfc/lib/test_utils.py b/sfc/lib/test_utils.py
index 36b52755..ed50c390 100644
--- a/sfc/lib/test_utils.py
+++ b/sfc/lib/test_utils.py
@@ -230,11 +230,11 @@ def check_ssh(ips, retries=100):
def fill_installer_dict(installer_type):
- default_string = "defaults.installer.{}.".format(installer_type)
- installer_yaml_fields = {
- "user": default_string+"user",
- "password": default_string+"password",
- "cluster": default_string+"cluster",
- "pkey_file": default_string+"pkey_file"
- }
- return installer_yaml_fields
+ default_string = "defaults.installer.{}.".format(installer_type)
+ installer_yaml_fields = {
+ "user": default_string+"user",
+ "password": default_string+"password",
+ "cluster": default_string+"cluster",
+ "pkey_file": default_string+"pkey_file"
+ }
+ return installer_yaml_fields
diff --git a/sfc/tests/NAME_tests.py b/sfc/tests/NAME_tests.py
deleted file mode 100644
index e95004bc..00000000
--- a/sfc/tests/NAME_tests.py
+++ /dev/null
@@ -1,11 +0,0 @@
-
-def setup():
- print "SETUP!"
-
-
-def teardown():
- print "TEAR DOWN!"
-
-
-def test_basic():
- print "I RAN!"
diff --git a/sfc/tests/functest/README.tests b/sfc/tests/functest/README.tests
index d4e3df3e..f39d8888 100644
--- a/sfc/tests/functest/README.tests
+++ b/sfc/tests/functest/README.tests
@@ -34,15 +34,36 @@ will be running a firewall that blocks the traffic in a specific port (e.g.
33333). A symmetric service chain routing the traffic throught this SF will be
created as well.
-1st check: The client is able to reach the server using a source port different
-from the one that the firewall blocks (e.g 22222), and the response gets back
-to the client.
+1st check: The client is able to reach the server and the response gets back
+to the client. Here the firewall is running without blocking any port.
-2nd check: The client is able to reach the server using the source port that
-the firewall blocks, but responses back from the server are blocked, as the
-symmetric service chain makes them go through the firewall that blocks on the
-destination port initially used as source port by the client (e.g. 33333).
+2nd check: The client is not able to reach the server as the firewall
+is configured to block traffic on port 80, and the request from the client
+is blocked, as the symmetric service chain makes them go through the firewall.
-If the client is able to receive the response, it would be a symptom of the
+If the client is able to reach the server, it would be a symptom of the
+symmetric chain not working, as traffic would be flowing from client to server
+directly without traversing the SF.
+
+3rd check: The client is able to reach the server, as the firewall
+is configured to block traffic on port 22222, and the response from the server
+is blocked.
+
+If the server is able to reach the client, it would be a symptom of the
symmetric chain not working, as traffic would be flowing from server to client
directly without traversing the SF.
+
+4th check: The client is able to reach the server and the response gets back
+to the client. Like in 1st check to verify test ends with same config
+as at the beginning.
+
+
+## TEST DELETION - sfc_chain_deletion ##
+
+One client and one server are created using nova. Then a SF is created using tacker.
+A service chain which routes the traffic through this SF will be created as well.
+After that the chain is deleted and re-created.
+
+vxlan_tool is started in the SF and HTTP traffic is sent from the client to the server.
+If it works, the vxlan_tool is modified to block HTTP traffic.
+It is tried again and it should fail because packets are dropped. \ No newline at end of file
diff --git a/sfc/tests/functest/config.yaml b/sfc/tests/functest/config.yaml
index 9c743553..021b4c39 100644
--- a/sfc/tests/functest/config.yaml
+++ b/sfc/tests/functest/config.yaml
@@ -25,6 +25,10 @@ defaults:
vnfd-dir: "vnfd-templates"
vnfd-default-params-file: "test-vnfd-default-params.yaml"
+ # mano_component can be [tacker, no-mano]. When no-mano,
+ # then networking-sfc is used
+ mano_component: "no-mano"
+
# [OPTIONAL] Only when deploying VNFs without the default image (vxlan_tool)
# vnf_image_name: xxx
# vnf_image_format: yyy
@@ -56,7 +60,7 @@ testcases:
sfc_two_chains_SSH_and_HTTP:
class_name: "SfcTwoChainsSSHandHTTP"
- enabled: false
+ enabled: true
order: 1
description: "ODL-SFC tests with two chains and one SF per chain"
net_name: example-net
@@ -80,7 +84,7 @@ testcases:
sfc_symmetric_chain:
class_name: "SfcSymmetricChain"
- enabled: false
+ enabled: true
order: 2
description: "Verify the behavior of a symmetric service chain"
net_name: example-net
@@ -102,7 +106,7 @@ testcases:
sfc_chain_deletion:
class_name: "SfcChainDeletion"
- enabled: false
+ enabled: true
order: 3
description: "Verify if chains work correctly after deleting one"
net_name: example-net
@@ -113,7 +117,6 @@ testcases:
secgroup_descr: "Example Security group"
vnf_names:
- 'testVNF1'
- - 'testVNF2'
supported_installers:
- 'fuel'
- 'apex'
diff --git a/sfc/tests/functest/pod.yaml.sample b/sfc/tests/functest/pod.yaml.sample
new file mode 100644
index 00000000..aa5fddad
--- /dev/null
+++ b/sfc/tests/functest/pod.yaml.sample
@@ -0,0 +1,58 @@
+# Sample config file about the POD information is located under the dovetail project.
+# https://github.com/opnfv/dovetail/blob/master/etc/userconfig/pod.yaml.sample
+# On the top of the above template the node0 could be used, defining the role Host.
+# After that the proper number of controller nodes should be defined and
+# at the end the respective compute nodes.
+
+nodes:
+-
+ # This can not be changed and must be node0.
+ name: node0
+
+ # This must be Host.
+ role: Host
+
+ # This is the instance IP of a node which has installed.
+ ip: xx.xx.xx.xx
+
+ # User name of the user of this node. This user **must** have sudo privileges.
+ user: root
+
+ # keyfile of the user.
+ key_filename: /root/.ssh/id_rsa
+
+-
+ # This can not be changed and must be node1.
+ name: node1
+
+ # This must be controller.
+ role: Controller
+
+ # This is the instance IP of a controller node
+ ip: xx.xx.xx.xx
+
+ # User name of the user of this node. This user **must** have sudo privileges.
+ user: root
+
+ # keyfile of the user.
+ key_filename: /root/.ssh/id_rsa
+
+-
+ # This can not be changed and must be node1.
+ name: node2
+
+ # This must be compute.
+ role: Compute
+
+ # This is the instance IP of a compute node
+ ip: xx.xx.xx.xx
+
+ # User name of the user of this node. This user **must** have sudo privileges.
+ user: root
+
+ # keyfile of the user.
+ key_filename: /root/.ssh/id_rsa
+
+ # Private ssh key for accessing the controller nodes. If there is not
+ # a keyfile for that use, the password of the user could be used instead.
+ # password: root \ No newline at end of file
diff --git a/sfc/tests/functest/run_sfc_tests.py b/sfc/tests/functest/run_sfc_tests.py
index 6fe211bf..7f0eaa8a 100644
--- a/sfc/tests/functest/run_sfc_tests.py
+++ b/sfc/tests/functest/run_sfc_tests.py
@@ -104,35 +104,49 @@ class SfcFunctest(testcase.TestCase):
time.sleep(10)
def __disable_heat_resource_finder_cache(self, nodes, installer_type):
- controllers = [node for node in nodes if node.is_controller()]
+
+ if COMMON_CONFIG.installer_type != 'configByUser':
+ controllers = [node for node in nodes if node.is_controller()]
+ else:
+ controllers = []
+ for n in COMMON_CONFIG.nodes_pod:
+ if n['role'] == 'Controller':
+ controllers.append(n)
+ logger.info("CONTROLLER : %s", controllers)
if installer_type == 'apex':
self.__disable_heat_resource_finder_cache_apex(controllers)
elif installer_type == "fuel":
self.__disable_heat_resource_finder_cache_fuel(controllers)
- elif installer_type == "osa" or "compass":
+ elif installer_type == "osa" or "compass" or "configByUser":
pass
else:
raise Exception('Unsupported installer')
def run(self):
- deploymentHandler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
-
cluster = COMMON_CONFIG.installer_cluster
- nodes = (deploymentHandler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deploymentHandler.get_nodes())
+ if COMMON_CONFIG.installer_type != 'configByUser':
+ deploymentHandler = DeploymentFactory.get_handler(
+ COMMON_CONFIG.installer_type,
+ COMMON_CONFIG.installer_ip,
+ COMMON_CONFIG.installer_user,
+ COMMON_CONFIG.installer_password,
+ COMMON_CONFIG.installer_key_file)
+
+ nodes = (deploymentHandler.get_nodes({'cluster': cluster})
+ if cluster is not None
+ else deploymentHandler.get_nodes())
+ self.__disable_heat_resource_finder_cache(nodes,
+ COMMON_CONFIG.
+ installer_type)
+ odl_ip, odl_port = odl_utils.get_odl_ip_port(nodes)
- self.__disable_heat_resource_finder_cache(nodes,
- COMMON_CONFIG.installer_type)
-
- odl_ip, odl_port = odl_utils.get_odl_ip_port(nodes)
+ else:
+ nodes = COMMON_CONFIG.nodes_pod
+ self.__disable_heat_resource_finder_cache(nodes, "configByUser")
+ odl_ip, odl_port = odl_utils. \
+ get_odl_ip_port_no_installer(COMMON_CONFIG.nodes_pod)
ovs_logger = ovs_log.OVSLogger(
os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
@@ -176,7 +190,10 @@ class SfcFunctest(testcase.TestCase):
result = {'status': 'FAILED'}
creators = tc_instance.get_creators()
if self.cleanup_flag is True:
- sfc_cleanup.cleanup(creators, odl_ip=odl_ip,
+ sfc_cleanup.cleanup(testcase_config,
+ creators,
+ COMMON_CONFIG.mano_component,
+ odl_ip=odl_ip,
odl_port=odl_port)
cleanup_run_flag = True
end_time = time.time()
@@ -198,7 +215,10 @@ class SfcFunctest(testcase.TestCase):
self.details.update({test_name: dic})
if cleanup_run_flag is not True and self.cleanup_flag is True:
- sfc_cleanup.cleanup(creators, odl_ip=odl_ip,
+ sfc_cleanup.cleanup(testcase_config,
+ creators,
+ COMMON_CONFIG.mano_component,
+ odl_ip=odl_ip,
odl_port=odl_port)
self.stop_time = time.time()
diff --git a/sfc/tests/functest/setup_scripts/__init__.py b/sfc/tests/functest/setup_scripts/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/sfc/tests/functest/setup_scripts/__init__.py
+++ /dev/null
diff --git a/sfc/tests/functest/setup_scripts/compute_presetup_CI.bash b/sfc/tests/functest/setup_scripts/compute_presetup_CI.bash
deleted file mode 100644
index 36148aa1..00000000
--- a/sfc/tests/functest/setup_scripts/compute_presetup_CI.bash
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-# This script must be use with vxlan-gpe + nsh. Once we have eth + nsh support
-# in ODL, we will not need it anymore
-
-set -e
-ssh_options='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-BASEDIR=`dirname $0`
-INSTALLER_IP=${INSTALLER_IP:-10.20.0.2}
-
-pushd $BASEDIR
-#ip=`sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'fuel node'|grep compute|\
-#awk '{print $10}' | head -1`
-
-ip=$1
-echo $ip
-#sshpass -p r00tme scp $ssh_options correct_classifier.bash ${INSTALLER_IP}:/root
-#sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'scp correct_classifier.bash '"$ip"':/root'
-
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ifconfig br-int up'
-output=$(sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ip route | \
-cut -d" " -f1 | grep 11.0.0.0' ; exit 0)
-
-if [ -z "$output" ]; then
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ip route add 11.0.0.0/24 \
-dev br-int'
-fi
diff --git a/sfc/tests/functest/setup_scripts/delete.sh b/sfc/tests/functest/setup_scripts/delete.sh
deleted file mode 100644
index 3333c52b..00000000
--- a/sfc/tests/functest/setup_scripts/delete.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-# Remember to source the env variables $creds before
-FILE=$(readlink -f $0)
-FILE_PATH=$(dirname $FILE)
-cd $FILE_PATH
-python ../../../lib/cleanup.py $1 $2
-openstack server delete client
-openstack server delete server
-for line in $(openstack floating ip list);do openstack floating ip delete $line;done
diff --git a/sfc/tests/functest/setup_scripts/delete_symmetric.sh b/sfc/tests/functest/setup_scripts/delete_symmetric.sh
deleted file mode 100644
index b0aa4d81..00000000
--- a/sfc/tests/functest/setup_scripts/delete_symmetric.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-# Remember to source the env variables $creds before
-tacker sfc-classifier-delete red_http
-tacker sfc-classifier-delete red_http_reverse
-tacker sfc-delete red
-tacker vnf-delete testVNF1
-tacker vnfd-delete test-vnfd1
-nova delete client
-nova delete server
-for line in $(neutron floatingip-list | cut -d" " -f2);do neutron floatingip-delete $line;done
diff --git a/sfc/tests/functest/setup_scripts/prepare_odl_sfc.py b/sfc/tests/functest/setup_scripts/prepare_odl_sfc.py
deleted file mode 100644
index 1ddf36a6..00000000
--- a/sfc/tests/functest/setup_scripts/prepare_odl_sfc.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#
-# Author: George Paraskevopoulos (geopar@intracom-telecom.com)
-# Manuel Buil (manuel.buil@ericsson.com)
-# Prepares the controller and the compute nodes for the odl-sfc testcase
-#
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-import os
-import paramiko
-import subprocess
-import sys
-
-import functest.utils.functest_logger as ft_logger
-
-
-logger = ft_logger.Logger(__name__).getLogger()
-
-SFC_REPO_DIR = "/home/opnfv/repos/sfc"
-
-try:
- INSTALLER_IP = os.environ['INSTALLER_IP']
-except:
- logger.debug("INSTALLER_IP does not exist. We create 10.20.0.2")
- INSTALLER_IP = "10.20.0.2"
-
-os.environ['ODL_SFC_LOG'] = "/home/opnfv/functest/results/sfc.log"
-os.environ['ODL_SFC_DIR'] = os.path.join(SFC_REPO_DIR,
- "sfc/tests/functest")
-SETUP_SCRIPTS_DIR = os.path.join(os.environ['ODL_SFC_DIR'], 'setup_scripts')
-
-command = SETUP_SCRIPTS_DIR + ("/server_presetup_CI.bash | "
- "tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1")
-
-output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-# for line in iter(output.stdout.readline, ''):
-# i = line.rstrip()
-# print(i)
-
-# Make sure the process is finished before checking the returncode
-if not output.poll():
- output.wait()
-
-# Get return value
-if output.returncode:
- print("The presetup of the server did not work")
- sys.exit(output.returncode)
-
-logger.info("The presetup of the server worked ")
-
-ssh_options = "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
-ssh = paramiko.SSHClient()
-ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-
-try:
- ssh.connect(INSTALLER_IP, username="root",
- password="r00tme", timeout=2)
- command = "fuel node | grep compute | awk '{print $10}'"
- logger.info("Executing ssh to collect the compute IPs")
- (stdin, stdout, stderr) = ssh.exec_command(command)
-except:
- logger.debug("Something went wrong in the ssh to collect the computes IP")
-
-output = stdout.readlines()
-for ip in output:
- command = SETUP_SCRIPTS_DIR + ("/compute_presetup_CI.bash " + ip.rstrip() +
- "| tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1")
-
- output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-# for line in iter(output.stdout.readline, ''):
-# print(line)
-# sys.stdout.flush()
-
- output.stdout.close()
-
- if not (output.poll()):
- output.wait()
-
- # Get return value
- if output.returncode:
- print("The compute config did not work on compute %s" % ip)
- sys.exit(output.returncode)
-
-sys.exit(0)
diff --git a/sfc/tests/functest/setup_scripts/server_presetup_CI.bash b/sfc/tests/functest/setup_scripts/server_presetup_CI.bash
deleted file mode 100644
index 240353f5..00000000
--- a/sfc/tests/functest/setup_scripts/server_presetup_CI.bash
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-set -e
-ssh_options='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-BASEDIR=`dirname $0`
-INSTALLER_IP=${INSTALLER_IP:-10.20.0.2}
-
-pushd $BASEDIR
-ip=$(sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'fuel node'|grep controller|awk '{print $10}' | head -1)
-echo $ip
-
-sshpass -p r00tme scp $ssh_options delete.sh ${INSTALLER_IP}:/root
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'scp '"$ip"':/root/tackerc .'
-sshpass -p r00tme scp $ssh_options ${INSTALLER_IP}:/root/tackerc $BASEDIR
diff --git a/sfc/tests/functest/sfc_chain_deletion.py b/sfc/tests/functest/sfc_chain_deletion.py
index 44982c91..5f73d0c7 100644
--- a/sfc/tests/functest/sfc_chain_deletion.py
+++ b/sfc/tests/functest/sfc_chain_deletion.py
@@ -32,18 +32,18 @@ class SfcChainDeletion(sfc_parent_function.SfcCommonTestCase):
def run(self):
logger.info("The test scenario %s is starting", __name__)
- self.create_custom_vnfd(self.testcase_config.test_vnfd_red,
- 'test-vnfd1')
- self.create_custom_av(self.vnfs[0], 'test-vnfd1', 'test-vim')
+ self.register_vnf_template(self.testcase_config.test_vnfd_red,
+ 'test-vnfd1')
+ self.create_vnf(self.vnfs[0], 'test-vnfd1', 'test-vim')
- self.create_chain(self.testcase_config)
+ self.create_vnffg(self.testcase_config.test_vnffgd_red, 'red',
+ 'red_http', port=80, protocol='tcp', symmetric=False)
t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
args=(self.ovs_logger, self.compute_nodes,
self.odl_ip, self.odl_port,
- self.client_instance.compute_host,
+ self.client_instance.hypervisor_hostname,
[self.neutron_port],))
-
try:
t1.start()
except Exception as e:
@@ -63,12 +63,14 @@ class SfcChainDeletion(sfc_parent_function.SfcCommonTestCase):
self.remove_vnffg('red_http', 'red')
self.check_deletion()
- self.create_chain(self.testcase_config)
+ self.create_vnffg(self.testcase_config.test_vnffgd_red, 'blue',
+ 'blue_http', port=80, protocol='tcp',
+ symmetric=False)
t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
args=(self.ovs_logger, self.compute_nodes,
self.odl_ip, self.odl_port,
- self.client_instance.compute_host,
+ self.client_instance.hypervisor_hostname,
[self.neutron_port],))
try:
t2.start()
diff --git a/sfc/tests/functest/sfc_one_chain_two_service_functions.py b/sfc/tests/functest/sfc_one_chain_two_service_functions.py
index bc36c494..38fa3fef 100644
--- a/sfc/tests/functest/sfc_one_chain_two_service_functions.py
+++ b/sfc/tests/functest/sfc_one_chain_two_service_functions.py
@@ -29,22 +29,23 @@ class SfcOneChainTwoServiceTC(sfc_parent_function.SfcCommonTestCase):
def run(self):
logger.info("The test scenario %s is starting", __name__)
- self.create_custom_vnfd(self.testcase_config.test_vnfd_red,
- 'test-vnfd1')
- self.create_custom_vnfd(self.testcase_config.test_vnfd_blue,
- 'test-vnfd2')
- self.create_custom_av(self.vnfs[0], 'test-vnfd1', 'test-vim')
- self.create_custom_av(self.vnfs[1], 'test-vnfd2', 'test-vim')
+ self.register_vnf_template(self.testcase_config.test_vnfd_red,
+ 'test-vnfd1')
+ self.register_vnf_template(self.testcase_config.test_vnfd_blue,
+ 'test-vnfd2')
+
+ self.create_vnf(self.vnfs[0], 'test-vnfd1', 'test-vim')
+ self.create_vnf(self.vnfs[1], 'test-vnfd2', 'test-vim')
self.create_vnffg(self.testcase_config.test_vnffgd_red, 'red',
- 'red_http')
+ 'red_http', port=80, protocol='tcp', symmetric=False)
# Start measuring the time it takes to implement the
# classification rules
t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
args=(self.ovs_logger, self.compute_nodes,
self.odl_ip, self.odl_port,
- self.client_instance.compute_host,
+ self.client_instance.hypervisor_hostname,
[self.neutron_port],))
try:
t1.start()
diff --git a/sfc/tests/functest/sfc_parent_function.py b/sfc/tests/functest/sfc_parent_function.py
index aa08e831..410c0e71 100644
--- a/sfc/tests/functest/sfc_parent_function.py
+++ b/sfc/tests/functest/sfc_parent_function.py
@@ -47,14 +47,16 @@ class SfcCommonTestCase(object):
self.server_creator = None
self.client_instance = None
self.client_creator = None
- self.server_ip = None
self.vnf_id = None
self.client_floating_ip = None
self.server_floating_ip = None
- self.fips_sfs = None
- self.neutron_port = None
+ self.fips_sfs = []
+ self.vnf_objects = dict()
self.testcase_config = testcase_config
self.vnfs = vnfs
+ self.port_server = None
+ self.server_ip = None
+ self.port_client = None
self.prepare_env(testcase_config, supported_installers, vnfs)
@@ -68,37 +70,66 @@ class SfcCommonTestCase(object):
:return: Environment preparation
"""
- deployment_handler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
+ if COMMON_CONFIG.installer_type != 'configByUser':
+ deployment_handler = DeploymentFactory.get_handler(
+ COMMON_CONFIG.installer_type,
+ COMMON_CONFIG.installer_ip,
+ COMMON_CONFIG.installer_user,
+ COMMON_CONFIG.installer_password,
+ COMMON_CONFIG.installer_key_file)
+
+ installer_type = os.environ.get("INSTALLER_TYPE")
+ installer_ip = os.environ.get("INSTALLER_IP")
+ cluster = COMMON_CONFIG.installer_cluster
+ openstack_nodes = (deployment_handler.
+ get_nodes({'cluster': cluster})
+ if cluster is not None
+ else deployment_handler.get_nodes())
+
+ self.compute_nodes = [node for node in openstack_nodes
+ if node.is_compute()]
+
+ for compute in self.compute_nodes:
+ logger.info("This is a compute: %s" % compute.ip)
+
+ controller_nodes = [node for node in openstack_nodes
+ if node.is_controller()]
+ self.controller_clients = test_utils. \
+ get_ssh_clients(controller_nodes)
+ self.compute_clients = test_utils. \
+ get_ssh_clients(self.compute_nodes)
+
+ self.odl_ip, self.odl_port = odl_utils. \
+ get_odl_ip_port(openstack_nodes)
+
+ else:
+ installer_type = 'configByUser'
+ installer_ip = COMMON_CONFIG.installer_ip
+ openstack_nodes = COMMON_CONFIG.nodes_pod
+ self.compute_nodes = [node for node in
+ COMMON_CONFIG.nodes_pod
+ if node['role'] == 'Compute']
+
+ for compute in self.compute_nodes:
+ logger.info("This is a compute: %s" % compute['ip'])
- installer_type = os.environ.get("INSTALLER_TYPE")
+ controller_nodes = [node for node in openstack_nodes
+ if node['role'] == 'Controller']
+
+ self.odl_ip, self.odl_port = odl_utils. \
+ get_odl_ip_port_no_installer(openstack_nodes)
if installer_type not in supported_installers:
- raise Exception(
- '\033[91mYour installer is not supported yet\033[0m')
+ if installer_type != 'configByUser':
+ raise Exception(
+ '\033[91mYour installer is not supported yet\033[0m')
- installer_ip = os.environ.get("INSTALLER_IP")
if not installer_ip:
logger.error(
'\033[91minstaller ip is not set\033[0m')
raise Exception(
'\033[91mexport INSTALLER_IP=<ip>\033[0m')
- cluster = COMMON_CONFIG.installer_cluster
- openstack_nodes = (deployment_handler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deployment_handler.get_nodes())
-
- self.compute_nodes = [node for node in openstack_nodes
- if node.is_compute()]
-
- for compute in self.compute_nodes:
- logger.info("This is a compute: %s" % compute.ip)
-
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
@@ -111,15 +142,10 @@ class SfcCommonTestCase(object):
if not custom_flv:
raise Exception("Failed to create custom flavor")
- controller_nodes = [node for node in openstack_nodes
- if node.is_controller()]
-
- self.controller_clients = test_utils.get_ssh_clients(controller_nodes)
- self.compute_clients = test_utils.get_ssh_clients(self.compute_nodes)
-
- self.tacker_client = os_sfc_utils.get_tacker_client()
- os_sfc_utils.register_vim(self.tacker_client,
- vim_file=COMMON_CONFIG.vim_file)
+ if COMMON_CONFIG.mano_component == 'tacker':
+ self.tacker_client = os_sfc_utils.get_tacker_client()
+ os_sfc_utils.register_vim(self.tacker_client,
+ vim_file=COMMON_CONFIG.vim_file)
self.ovs_logger = ovs_log.OVSLogger(
os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
@@ -150,7 +176,7 @@ class SfcCommonTestCase(object):
self.creators = openstack_sfc.creators
- self.odl_ip, self.odl_port = odl_utils.get_odl_ip_port(openstack_nodes)
+ odl_utils.get_odl_username_password()
self.default_param_file = os.path.join(
COMMON_CONFIG.sfc_test_dir,
@@ -166,25 +192,53 @@ class SfcCommonTestCase(object):
logger.info('Topology description: {0}'
.format(self.test_topology['description']))
- self.server_instance, self.server_creator = \
+ self.server_instance, port_server = \
openstack_sfc.create_instance(SERVER, COMMON_CONFIG.flavor,
self.image_creator, self.network,
self.sg,
- av_zone=self.test_topology['server'])
+ self.test_topology['server'],
+ [SERVER + '-port'])
- self.client_instance, self.client_creator = \
+ self.client_instance, port_client = \
openstack_sfc.create_instance(CLIENT, COMMON_CONFIG.flavor,
self.image_creator, self.network,
self.sg,
- av_zone=self.test_topology['client'])
+ self.test_topology['client'],
+ [CLIENT + '-port'])
+
logger.info('This test is run with the topology {0}'.format(
self.test_topology['id']))
logger.info('Topology description: {0}'.format(
self.test_topology['description']))
- self.server_ip = self.server_instance.ports[0].ips[0]['ip_address']
- logger.info("Server instance received private ip [{}]".format(
- self.server_ip))
+ if COMMON_CONFIG.installer_type != 'configByUser':
+ self.port_server = port_server[0]
+ self.port_client = port_client[0]
+ port_fixed_ips = self.port_server
+ for ip in port_fixed_ips:
+ self.server_ip = ip.get('ip_address')
+ logger.info("Server instance received private ip [{}]".format(
+ self.server_ip))
+ else:
+ self.port_server = port_server
+ self.port_client = port_client
+ self.server_ip = self.server_instance.ports[0].ips[0]['ip_address']
+ logger.info("Server instance received private ip [{}]".format(
+ self.server_ip))
+
+ def register_vnf_template(self, test_case_name, template_name):
+ """ Register the template which defines the VNF
+
+ :param test_case_name: the name of the test case
+ :param template_name: name of the template
+ """
+
+ if COMMON_CONFIG.mano_component == 'tacker':
+ self.create_custom_vnfd(test_case_name, template_name)
+
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ # networking-sfc does not have the template concept
+ pass
def create_custom_vnfd(self, test_case_name, vnfd_name):
"""Create VNF Descriptor (VNFD)
@@ -201,12 +255,14 @@ class SfcCommonTestCase(object):
tosca_file=tosca_file,
vnfd_name=vnfd_name)
- def create_custom_av(self, vnf_names, av_member1, av_member2):
- """Create custom 'av'
+ def create_vnf(self, vnf_name, vnfd_name=None, vim_name=None,
+ symmetric=False):
+ """Create custom vnf
- :param vnf_names: names of available vnf(s)
- :param av_member1: the first member of av zone
- :param av_member2: the second member of av zone
+ :param vnf_name: name of the vnf
+ :param vnfd_name: name of the vnfd template (tacker)
+ :param vim_name: name of the vim (tacker)
+ :param symmetric: specifies whether this is part of the symmetric test
:return: av zone
"""
@@ -215,41 +271,70 @@ class SfcCommonTestCase(object):
logger.info('Topology description: {0}'
.format(self.test_topology['description']))
- os_sfc_utils.create_vnf_in_av_zone(
- self.tacker_client, vnf_names, av_member1, av_member2,
- self.default_param_file, self.test_topology[vnf_names])
-
- self.vnf_id = os_sfc_utils.wait_for_vnf(self.tacker_client,
- vnf_name=vnf_names)
- if self.vnf_id is None:
- raise Exception('ERROR while booting vnfs')
+ if COMMON_CONFIG.mano_component == 'tacker':
+ os_sfc_utils.create_vnf_in_av_zone(
+ self.tacker_client, vnf_name, vnfd_name, vim_name,
+ self.default_param_file, self.test_topology[vnf_name])
+
+ self.vnf_id = os_sfc_utils.wait_for_vnf(self.tacker_client,
+ vnf_name=vnf_name)
+ if self.vnf_id is None:
+ raise Exception('ERROR while booting vnfs')
+
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ av_zone = self.test_topology[vnf_name]
+ if symmetric:
+ ports = [vnf_name + '-port1', vnf_name + '-port2']
+ else:
+ ports = [vnf_name + '-port']
+ vnf_instance, vnf_port = \
+ openstack_sfc.create_instance(vnf_name, COMMON_CONFIG.flavor,
+ self.vnf_image_creator,
+ self.network,
+ self.sg,
+ av_zone,
+ ports,
+ port_security=False)
+
+ self.vnf_objects[vnf_name] = [vnf_instance, vnf_port]
+ logger.info("Creating VNF with name...%s", vnf_name)
+ logger.info("Port associated with VNF...%s",
+ self.vnf_objects[vnf_name][1])
def assign_floating_ip_client_server(self):
"""Assign floating IPs on the router about server and the client
instances
-
:return: Floating IPs for client and server
"""
-
logger.info("Assigning floating IPs to client and server instances")
self.client_floating_ip = openstack_sfc.assign_floating_ip(
- self.router, self.client_instance, self.client_creator)
+ self.client_instance, self.port_client)
self.server_floating_ip = openstack_sfc.assign_floating_ip(
- self.router, self.server_instance, self.server_creator)
+ self.server_instance, self.port_server)
- def assign_floating_ip_sfs(self, vnf_ip=None):
+ def assign_floating_ip_sfs(self):
"""Assign floating IPs to service function
- :param vnf_ip: IP of vnf - optional
:return: The list fips_sfs consist of the available IPs for service
functions
"""
logger.info("Assigning floating IPs to service functions")
- self.fips_sfs = openstack_sfc.assign_floating_ip_vnfs(self.router,
- vnf_ip)
+ if COMMON_CONFIG.mano_component == 'tacker':
+ vnf_ip = os_sfc_utils.get_vnf_ip(self.tacker_client,
+ vnf_id=self.vnf_id)
+ self.fips_sfs = openstack_sfc.assign_floating_ip_vnfs(self.router,
+ vnf_ip)
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ for vnf in self.vnfs:
+ # instance object is in [0] and port in [1]
+ vnf_instance = self.vnf_objects[vnf][0]
+ vnf_port = self.vnf_objects[vnf][1]
+ sf_floating_ip = openstack_sfc.\
+ assign_floating_ip(vnf_instance, vnf_port[0])
+ self.fips_sfs.append(sf_floating_ip)
def check_floating_ips(self):
"""Check the responsivness of the floating IPs
@@ -338,37 +423,207 @@ class SfcCommonTestCase(object):
:param par_vnffgd_name: The vnffgd name of network components
:return: Remove the vnffg and vnffgd components
"""
+ if COMMON_CONFIG.mano_component == 'tacker':
+ os_sfc_utils.delete_vnffg(self.tacker_client,
+ vnffg_name=par_vnffg_name)
+
+ os_sfc_utils.delete_vnffgd(self.tacker_client,
+ vnffgd_name=par_vnffgd_name)
+
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ # TODO: If we had a testcase where only one chains must be removed
+ # we would need to add the logic. Now it removes all of them
+ openstack_sfc.delete_chain()
+ openstack_sfc.delete_port_groups()
+
+ def create_classifier(self, fc_name, port=85,
+ protocol='tcp', symmetric=False):
+ """Create the classifier component following the instructions from
+ relevant templates.
- os_sfc_utils.delete_vnffg(self.tacker_client,
- vnffg_name=par_vnffg_name)
+ :param fc_name: The name of the classifier
+ :param port: Input port number
+ :param protocol: Input protocol
+ :param symmetric: Check symmetric
+ :return: Create the classifier component
+ """
- os_sfc_utils.delete_vnffgd(self.tacker_client,
- vnffgd_name=par_vnffgd_name)
+ logger.info("Creating the classifier...")
- def create_vnffg(self, testcase_config_name, vnf_name, conn_name):
+ self.neutron_port = self.port_client
+ if COMMON_CONFIG.mano_component == 'no-mano':
+ openstack_sfc.create_classifier(self.neutron_port.id,
+ port,
+ protocol,
+ fc_name,
+ symmetric)
+
+ elif COMMON_CONFIG.mano_component == 'tacker':
+ logger.info("Creating classifier with tacker is not supported")
+
+ def create_vnffg(self, testcase_config_name, vnffgd_name, vnffg_name,
+ port=80, protocol='tcp', symmetric=False, vnf_index=-1):
"""Create the vnffg components following the instructions from
relevant templates.
:param testcase_config_name: The config input of the test case
- :param vnf_name: The name of the vnf
- :param conn_name: Protocol type / name of the component
+ :param vnffgd_name: The name of the vnffgd template
+ :param vnffg_name: The name for the vnffg
+ :param port: Input port number
+ :param protocol: Input protocol
+ :param symmetric: Check symmetric
+ :param vnf_index: Index to specify vnf
:return: Create the vnffg component
"""
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- testcase_config_name)
+ logger.info("Creating the vnffg...")
+
+ if COMMON_CONFIG.mano_component == 'tacker':
+ tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnffgd_dir,
+ testcase_config_name)
+
+ os_sfc_utils.create_vnffgd(self.tacker_client,
+ tosca_file=tosca_file,
+ vnffgd_name=vnffgd_name)
+
+ self.neutron_port = self.port_client
+
+ if symmetric:
+ server_ip_prefix = self.server_ip + '/32'
+
+ os_sfc_utils.create_vnffg_with_param_file(
+ self.tacker_client,
+ vnffgd_name,
+ vnffg_name,
+ self.default_param_file,
+ self.neutron_port.id,
+ server_port=self.port_server.id,
+ server_ip=server_ip_prefix)
+
+ else:
+ os_sfc_utils.create_vnffg_with_param_file(
+ self.tacker_client,
+ vnffgd_name,
+ vnffg_name,
+ self.default_param_file,
+ self.neutron_port.id)
+
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ logger.info("Creating the vnffg without any mano component...")
+ port_groups = []
+ if vnf_index == -1:
+ for vnf in self.vnfs:
+ # vnf_instance is in [0] and vnf_port in [1]
+ vnf_instance = self.vnf_objects[vnf][0]
+ vnf_port = self.vnf_objects[vnf][1]
+ if symmetric:
+ # VNFs have two ports
+ neutron_port1 = vnf_port[0]
+ neutron_port2 = vnf_port[1]
+ neutron_ports = [neutron_port1, neutron_port2]
+ else:
+ neutron_port1 = vnf_port[0]
+ neutron_ports = [neutron_port1]
+
+ port_group = \
+ openstack_sfc.create_port_groups(neutron_ports,
+ vnf_instance)
+ port_groups.append(port_group)
+
+ else:
+ vnf = self.vnfs[vnf_index]
+ vnf_instance = self.vnf_objects[vnf][0]
+ vnf_port = self.vnf_objects[vnf][1]
+ if symmetric:
+ # VNFs have two ports
+ neutron_port1 = vnf_port[0]
+ neutron_port2 = vnf_port[1]
+ neutron_ports = [neutron_port1, neutron_port2]
+ else:
+ neutron_port1 = vnf_port[0]
+ neutron_ports = [neutron_port1]
+
+ port_group = openstack_sfc.create_port_groups(
+ neutron_ports, vnf_instance)
+ port_groups.append(port_group)
+
+ self.neutron_port = self.port_client
+
+ if symmetric:
+ # We must pass the server_port and server_ip in the symmetric
+ # case. Otherwise ODL does not work well
+ server_ip_prefix = self.server_ip + '/32'
+ openstack_sfc.create_chain(port_groups,
+ self.neutron_port.id,
+ port, protocol, vnffg_name,
+ symmetric,
+ server_port=self.port_server.id,
+ server_ip=server_ip_prefix)
+
+ else:
+ openstack_sfc.create_chain(port_groups,
+ self.neutron_port.id,
+ port, protocol, vnffg_name,
+ symmetric)
+
+ def update_vnffg(self, testcase_config_name, vnffgd_name, vnffg_name,
+ port=80, protocol='tcp', symmetric=False,
+ vnf_index=0, fc_name='red'):
+ """Update the vnffg components following the instructions from
+ relevant templates.
- os_sfc_utils.create_vnffgd(self.tacker_client,
- tosca_file=tosca_file,
- vnffgd_name=vnf_name)
+ :param testcase_config_name: The config input of the test case
+ :param vnffgd_name: The name of the vnffgd template
+ :param vnffg_name: The name for the vnffg
+ :param port: To input port number
+ :param protocol: To input protocol
+ :param symmetric: To check symmetric
+ :param vnf_index: Index to identify vnf
+ :param fc_name: The name of the flow classifier
+ :return: Update the vnffg component
+ """
- self.neutron_port = openstack_sfc.get_client_port(self.client_instance,
- self.client_creator)
- os_sfc_utils.create_vnffg_with_param_file(self.tacker_client, vnf_name,
- conn_name,
- self.default_param_file,
- self.neutron_port.id)
+ logger.info("Update the vnffg...")
+
+ if COMMON_CONFIG.mano_component == 'no-mano':
+ port_groups = []
+ for vnf in self.vnfs:
+ # vnf_instance is in [0] and vnf_port in [1]
+ vnf_instance = self.vnf_objects[vnf][0]
+ vnf_port = self.vnf_objects[vnf][1]
+ if symmetric:
+ # VNFs have two ports
+ neutron_port1 = vnf_port[0]
+ neutron_port2 = vnf_port[1]
+ neutron_ports = [neutron_port1, neutron_port2]
+ else:
+ neutron_port1 = vnf_port[0]
+ neutron_ports = [neutron_port1]
+
+ port_group = \
+ openstack_sfc.create_port_groups(neutron_ports,
+ vnf_instance)
+ port_groups.append(port_group)
+
+ openstack_sfc.update_chain(vnffg_name, fc_name, symmetric)
+
+ elif COMMON_CONFIG.mano_component == 'tacker':
+ logger.info("update for tacker is not supported")
+
+ def swap_classifiers(self, vnffg_1_name, vnffg_2_name, symmetric=False):
+ """Interchange classifiers between port chains
+
+ :param vnffg_1_name: Reference to port_chain_1
+ :param vnffg_2_name: Reference to port_chain_2
+ :param symmetric: To check symmetric
+ :return: Interchange the classifiers
+ """
+
+ if COMMON_CONFIG.mano_component == 'no-mano':
+ openstack_sfc.swap_classifiers(vnffg_1_name,
+ vnffg_2_name,
+ symmetric=False)
def present_results_http(self):
"""Check whether the connection between server and client using
@@ -438,19 +693,6 @@ class SfcCommonTestCase(object):
return results
- def create_chain(self, testcase_config):
- """Create a connection chain for the test scenario purposes
-
- :param testcase_config: The config input of the test case
- :return: Create the proper chain for the specific test scenario
- """
-
- self.neutron_port = openstack_sfc.get_client_port(self.client_instance,
- self.client_creator)
- odl_utils.create_chain(self.tacker_client, self.default_param_file,
- self.neutron_port, COMMON_CONFIG,
- testcase_config)
-
def check_deletion(self):
"""Check that the deletion of the chain has been completed sucessfully.
@@ -462,7 +704,7 @@ class SfcCommonTestCase(object):
check_vnffg_deletion(self.odl_ip, self.odl_port,
self.ovs_logger,
[self.neutron_port],
- self.client_instance.compute_host,
+ self.client_instance.hypervisor_hostname,
self.compute_nodes):
logger.debug("The chains were not correctly removed")
raise Exception("Chains not correctly removed, test failed")
diff --git a/sfc/tests/functest/sfc_symmetric_chain.py b/sfc/tests/functest/sfc_symmetric_chain.py
index 7d6b4c15..cec45219 100644
--- a/sfc/tests/functest/sfc_symmetric_chain.py
+++ b/sfc/tests/functest/sfc_symmetric_chain.py
@@ -8,13 +8,10 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-import os
-import sys
import threading
import logging
import urllib3
-import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
import sfc.lib.config as sfc_config
from sfc.tests.functest import sfc_parent_function
@@ -24,7 +21,6 @@ logger = logging.getLogger(__name__)
COMMON_CONFIG = sfc_config.CommonConfig()
CLIENT = "client"
SERVER = "server"
-openstack_sfc = os_sfc_utils.OpenStackSFC()
class SfcSymmetricChain(sfc_parent_function.SfcCommonTestCase):
@@ -42,54 +38,23 @@ class SfcSymmetricChain(sfc_parent_function.SfcCommonTestCase):
def run(self):
logger.info("The test scenario %s is starting", __name__)
- self.create_custom_vnfd(self.testcase_config.test_vnfd, 'test-vnfd1')
- self.create_custom_av(self.vnfs[0], 'test-vnfd1', 'test-vim')
-
- if self.vnf_id is None:
- logger.error('ERROR while booting VNF')
- sys.exit(1)
-
- tosca_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- self.testcase_config.test_vnffgd)
- os_sfc_utils.create_vnffgd(
- self.tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='test-vnffgd')
-
- client_port = openstack_sfc.get_client_port(
- self.client_instance,
- self.client_creator)
- server_port = openstack_sfc.get_client_port(
- self.server_instance,
- self.server_creator)
-
- server_ip_prefix = self.server_ip + '/32'
-
- default_param_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- COMMON_CONFIG.vnfd_default_params_file)
-
- os_sfc_utils.create_vnffg_with_param_file(
- self.tacker_client,
- 'test-vnffgd',
- 'test-vnffg',
- default_param_file,
- client_port.id,
- server_port.id,
- server_ip_prefix)
+
+ self.register_vnf_template(self.testcase_config.test_vnfd,
+ 'test-vnfd1')
+ self.create_vnf(self.vnfs[0], 'test-vnfd1', 'test-vim', symmetric=True)
+
+ self.create_vnffg(self.testcase_config.test_vnffgd, 'red-symmetric',
+ 'red_http', port=80, protocol='tcp', symmetric=True)
+
# Start measuring the time it takes to implement the classification
# rules
- t1 = threading.Thread(target=wait_for_classification_rules,
+ t1 = threading.Thread(target=symmetric_wait_for_classification_rules,
args=(self.ovs_logger, self.compute_nodes,
- self.server_instance.compute_host,
- server_port,
- self.client_instance.compute_host,
- client_port, self.odl_ip,
- self.odl_port,))
-
+ self.server_instance.hypervisor_hostname,
+ self.port_server,
+ self.client_instance.hypervisor_hostname,
+ self.port_client,
+ self.odl_ip, self.odl_port,))
try:
t1.start()
except Exception as e:
@@ -98,9 +63,7 @@ class SfcSymmetricChain(sfc_parent_function.SfcCommonTestCase):
logger.info("Assigning floating IPs to instances")
self.assign_floating_ip_client_server()
- vnf_ip = os_sfc_utils.get_vnf_ip(self.tacker_client,
- vnf_id=self.vnf_id)
- self.assign_floating_ip_sfs(vnf_ip)
+ self.assign_floating_ip_sfs()
self.check_floating_ips()
@@ -149,10 +112,10 @@ class SfcSymmetricChain(sfc_parent_function.SfcCommonTestCase):
return self.creators
-def wait_for_classification_rules(ovs_logger, compute_nodes,
- server_compute, server_port,
- client_compute, client_port,
- odl_ip, odl_port):
+def symmetric_wait_for_classification_rules(ovs_logger, compute_nodes,
+ server_compute, server_port,
+ client_compute, client_port,
+ odl_ip, odl_port):
if client_compute == server_compute:
odl_utils.wait_for_classification_rules(
ovs_logger,
diff --git a/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py b/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
index 29441c63..92c2711e 100644
--- a/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
+++ b/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
@@ -32,23 +32,29 @@ class SfcTwoChainsSSHandHTTP(sfc_parent_function.SfcCommonTestCase):
logger.info("The test scenario %s is starting", __name__)
- self.create_custom_vnfd(self.testcase_config.test_vnfd_red,
- 'test-vnfd1')
- self.create_custom_vnfd(self.testcase_config.test_vnfd_blue,
- 'test-vnfd2')
+ self.register_vnf_template(self.testcase_config.test_vnfd_red,
+ 'test-vnfd1')
+ self.register_vnf_template(self.testcase_config.test_vnfd_blue,
+ 'test-vnfd2')
- self.create_custom_av(self.vnfs[0], 'test-vnfd1', 'test-vim')
- self.create_custom_av(self.vnfs[1], 'test-vnfd2', 'test-vim')
+ self.create_vnf(self.vnfs[0], 'test-vnfd1', 'test-vim')
+ self.create_vnf(self.vnfs[1], 'test-vnfd2', 'test-vim')
+ logger.info("Call Parent create_vnffg with index")
self.create_vnffg(self.testcase_config.test_vnffgd_red, 'red',
- 'red_http')
+ 'red_http', port=80, protocol='tcp',
+ symmetric=False, vnf_index=0)
+
+ self.create_vnffg(self.testcase_config.test_vnffgd_blue, 'blue',
+ 'blue_ssh', port=22, protocol='tcp',
+ symmetric=False, vnf_index=1)
+ self.create_classifier('dummy')
t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
args=(self.ovs_logger, self.compute_nodes,
self.odl_ip, self.odl_port,
- self.client_instance.compute_host,
+ self.client_instance.hypervisor_hostname,
[self.neutron_port],))
-
try:
t1.start()
except Exception as e:
@@ -61,29 +67,26 @@ class SfcTwoChainsSSHandHTTP(sfc_parent_function.SfcCommonTestCase):
self.check_floating_ips()
self.start_services_in_vm()
- self.vxlan_blocking_start(self.fips_sfs[0], "22")
- self.vxlan_blocking_start(self.fips_sfs[1], "80")
+ self.vxlan_blocking_start(self.fips_sfs[0], "80")
+ self.vxlan_blocking_start(self.fips_sfs[1], "22")
logger.info("Wait for ODL to update the classification rules in OVS")
t1.join()
results = self.present_results_ssh()
- results = self.present_results_allowed_http()
+ results = self.present_results_http()
logger.info("Changing the classification")
- self.remove_vnffg('red_http', 'red')
-
- self.create_vnffg(self.testcase_config.test_vnffgd_blue, 'blue',
- 'blue_ssh')
+ self.swap_classifiers('red_http', 'blue_ssh')
# Start measuring the time it takes to implement the classification
# rules
t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
args=(self.ovs_logger, self.compute_nodes,
self.odl_ip, self.odl_port,
- self.client_instance.compute_host,
- self.neutron_port,))
+ self.client_instance.hypervisor_hostname,
+ [self.neutron_port],))
try:
t2.start()
except Exception as e:
@@ -92,7 +95,7 @@ class SfcTwoChainsSSHandHTTP(sfc_parent_function.SfcCommonTestCase):
logger.info("Wait for ODL to update the classification rules in OVS")
t2.join()
- results = self.present_results_http()
+ results = self.present_results_allowed_http()
results = self.present_results_allowed_ssh()
if __name__ == '__main__':
diff --git a/sfc/unit_tests/unit/lib/test_cleanup.py b/sfc/unit_tests/unit/lib/test_cleanup.py
index 5ec4261e..e6f59d23 100644
--- a/sfc/unit_tests/unit/lib/test_cleanup.py
+++ b/sfc/unit_tests/unit/lib/test_cleanup.py
@@ -14,8 +14,9 @@ import sfc.lib.cleanup as cleanup
from mock import patch
from mock import call
-from mock import Mock
from mock import DEFAULT
+from mock import Mock
+
__author__ = "Dimitrios Markou <mardim@intracom-telecom.com>"
@@ -262,33 +263,6 @@ class SfcCleanupTesting(unittest.TestCase):
mock_log.assert_has_calls(log_calls)
mock_del_vim.assert_has_calls(del_calls)
- @patch('sfc.lib.cleanup.logger.error')
- def test_delete_openstack_objects_exception(self, mock_log):
-
- """
- Check the proper functionality of the delete_openstack_objects
- function when exception occurs.
- """
-
- mock_creator_obj_one = Mock()
- mock_creator_obj_two = Mock()
- exception_one = Exception('First Boom!')
- exception_two = Exception('Second Boom!')
- attrs_list = [{'clean.side_effect': exception_one},
- {'clean.side_effect': exception_two}]
-
- mock_creator_obj_one.configure_mock(**attrs_list[0])
- mock_creator_obj_two.configure_mock(**attrs_list[1])
-
- mock_creator_objs_list = [mock_creator_obj_one, mock_creator_obj_two]
-
- log_calls = [call('Unexpected error cleaning - %s', exception_two),
- call('Unexpected error cleaning - %s', exception_one)]
-
- cleanup.delete_openstack_objects(mock_creator_objs_list)
-
- mock_log.assert_has_calls(log_calls)
-
@patch('sfc.lib.openstack_utils.OpenStackSFC', autospec=True)
def test_delete_untracked_security_groups(self,
mock_obj):
@@ -301,10 +275,7 @@ class SfcCleanupTesting(unittest.TestCase):
def test_cleanup_odl(self,
mock_del_odl_ietf,
mock_del_odl_res):
- resources = ['service-function-forwarder',
- 'service-function-chain',
- 'service-function-path',
- 'service-function']
+ resources = ['service-function-forwarder']
odl_res_calls = [call(self.odl_ip, self.odl_port, item)
for item in resources]
@@ -314,53 +285,185 @@ class SfcCleanupTesting(unittest.TestCase):
mock_del_odl_res.assert_has_calls(odl_res_calls)
mock_del_odl_ietf.assert_called_once_with(self.odl_ip, self.odl_port)
+ @patch('sfc.lib.openstack_utils.OpenStackSFC', autospec=True)
+ def test_cleanup_nsfc_objects(self, mock_os_sfc):
+ mock_os_sfc_ins = mock_os_sfc.return_value
+ cleanup.cleanup_nsfc_objects()
+ mock_os_sfc_ins.delete_chain.assert_called_once()
+ mock_os_sfc_ins.delete_port_groups.assert_called_once()
+
@patch('time.sleep')
- @patch('sfc.lib.cleanup.delete_openstack_objects')
- @patch('sfc.lib.cleanup.cleanup_odl')
- def test_cleanup(self,
- mock_cleanup_odl,
- mock_del_os_obj,
- mock_time):
+ def test_cleanup_tacker_objects(self, mock_time):
mock_dict = {'delete_vnffgs': DEFAULT,
'delete_vnffgds': DEFAULT,
'delete_vnfs': DEFAULT,
'delete_vnfds': DEFAULT,
- 'delete_vims': DEFAULT,
- 'delete_untracked_security_groups': DEFAULT}
+ 'delete_vims': DEFAULT}
with patch.multiple('sfc.lib.cleanup',
**mock_dict) as mock_values:
-
- cleanup.cleanup(['creator_one', 'creator_two'],
- self.odl_ip,
- self.odl_port)
+ cleanup.cleanup_tacker_objects()
for key in mock_values:
mock_values[key].assert_called_once()
+
+ mock_time.assert_called_once_with(20)
+
+ @patch('sfc.lib.cleanup.cleanup_tacker_objects')
+ def test_cleanup_mano_objects_tacker(self, mock_cleanup_tacker):
+ cleanup.cleanup_mano_objects('tacker')
+ mock_cleanup_tacker.assert_called_once()
+
+ @patch('sfc.lib.cleanup.cleanup_nsfc_objects')
+ def test_cleanup_mano_objects_nsfc(self, mock_cleanup_nsfc):
+ cleanup.cleanup_mano_objects('no-mano')
+ mock_cleanup_nsfc.assert_called_once()
+
+ @patch('sfc.lib.cleanup.connection')
+ @patch('sfc.lib.cleanup.logger.info')
+ def test_delete_openstack_objects(self, mock_log, mock_conn):
+ """
+ Checks the delete_chain method
+ """
+ testcase_config = Mock()
+ conn = Mock()
+ mock_creator_obj_one = Mock()
+ mock_creator_obj_one.name = 'subnet_name'
+ mock_creator_obj_two = Mock()
+ mock_creator_obj_two.name = 'creator_name'
+ mock_creator_objs_list = [mock_creator_obj_one, mock_creator_obj_two]
+
+ mock_conn.from_config.return_value = conn
+ testcase_config.subnet_name = mock_creator_obj_one.name
+ log_calls = [call('Deleting ' + mock_creator_obj_two.name),
+ call('Deleting ' + mock_creator_obj_one.name)]
+
+ cleanup.delete_openstack_objects(testcase_config,
+ mock_creator_objs_list)
+ mock_creator_obj_one.delete.\
+ assert_called_once_with(conn.session)
+ mock_creator_obj_two.delete.\
+ assert_called_once_with(conn.session)
+ mock_log.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.cleanup.connection')
+ @patch('sfc.lib.cleanup.logger.info')
+ def test_delete_openstack_objects_router(self, mock_log, mock_conn):
+ """
+ Checks the delete_chain method
+ """
+ testcase_config = Mock()
+ conn = Mock()
+ mock_creator_obj = Mock()
+ mock_creator_obj.name = 'creator_name'
+ mock_creator_router = Mock()
+ mock_creator_router.name = 'router_name'
+ mock_creator_router.id = '1'
+ mock_creator_subnet = Mock()
+ mock_creator_subnet.name = 'subnet_name'
+ mock_creator_subnet.id = '2'
+ mock_creator_objs_list = [mock_creator_subnet,
+ mock_creator_router,
+ mock_creator_obj]
+
+ mock_conn.from_config.return_value = conn
+ testcase_config.router_name = mock_creator_router.name
+ testcase_config.subnet_name = mock_creator_subnet.name
+
+ conn.network.get_subnet.return_value = mock_creator_subnet
+ log_calls = [call('Deleting ' + mock_creator_obj.name),
+ call('Deleting ' + mock_creator_router.name),
+ call('Removing subnet from router'),
+ call('Deleting router'),
+ call('Deleting ' + mock_creator_subnet.name)]
+
+ cleanup.delete_openstack_objects(testcase_config,
+ mock_creator_objs_list)
+ conn.network.remove_interface_from_router.\
+ assert_called_once_with(mock_creator_router.id,
+ mock_creator_subnet.id)
+ conn.network.delete_router.\
+ assert_called_once_with(mock_creator_router)
+ mock_creator_obj.delete.\
+ assert_called_once_with(conn.session)
+ mock_creator_subnet.delete.\
+ assert_called_once_with(conn.session)
+ mock_log.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.cleanup.connection')
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.cleanup.logger.error')
+ def test_delete_openstack_objects_exception(self, mock_log_err,
+ mock_log_info, mock_conn):
+ """
+ Check the proper functionality of the delete_openstack_objects
+ function when exception occurs.
+ """
+ testcase_config = Mock()
+ conn = Mock()
+ mock_creator_obj_one = Mock()
+ mock_creator_obj_one.name = 'subnet_name'
+ mock_creator_obj_two = Mock()
+ mock_creator_obj_two.name = 'creator_name'
+ exception_one = Exception('First Boom!')
+ exception_two = Exception('Second Boom!')
+ attrs_list = [{'delete.side_effect': exception_one},
+ {'delete.side_effect': exception_two}]
+
+ mock_creator_obj_one.configure_mock(**attrs_list[0])
+ mock_creator_obj_two.configure_mock(**attrs_list[1])
+
+ mock_creator_objs_list = [mock_creator_obj_one, mock_creator_obj_two]
+ mock_conn.from_config.return_value = conn
+ testcase_config.subnet_name = mock_creator_obj_one.name
+
+ log_calls = [call('Deleting ' + mock_creator_obj_two.name),
+ call('Deleting ' + mock_creator_obj_one.name),
+ call('Unexpected error cleaning - %s', exception_two),
+ call('Unexpected error cleaning - %s', exception_one)]
+
+ cleanup.delete_openstack_objects(testcase_config,
+ mock_creator_objs_list)
+ mock_creator_obj_one.delete.\
+ assert_called_once_with(conn.session)
+ mock_creator_obj_two.delete.\
+ assert_called_once_with(conn.session)
+
+ mock_log_info.assert_has_calls(log_calls[:2])
+ mock_log_err.assert_has_calls(log_calls[2:])
+
+ @patch('sfc.lib.cleanup.delete_untracked_security_groups')
+ @patch('sfc.lib.cleanup.cleanup_mano_objects')
+ @patch('sfc.lib.cleanup.delete_openstack_objects')
+ @patch('sfc.lib.cleanup.cleanup_odl')
+ def test_cleanup(self,
+ mock_cleanup_odl,
+ mock_del_os_obj,
+ mock_cleanup_mano,
+ mock_untr_sec_grps):
+
+ cleanup.cleanup('testcase_config', ['creator_one', 'creator_two'],
+ 'mano',
+ self.odl_ip,
+ self.odl_port)
+
mock_cleanup_odl.assert_called_once_with(self.odl_ip,
self.odl_port)
- mock_del_os_obj.assert_called_once_with(['creator_one', 'creator_two'])
- mock_time.assert_called_once_with(20)
+ mock_del_os_obj.assert_called_once_with('testcase_config',
+ ['creator_one', 'creator_two'])
+ mock_cleanup_mano.assert_called_once_with('mano')
+ mock_untr_sec_grps.assert_called_once()
- @patch('time.sleep')
+ @patch('sfc.lib.cleanup.cleanup_mano_objects')
@patch('sfc.lib.cleanup.cleanup_odl')
def test_cleanup_from_bash(self,
mock_cleanup_odl,
- mock_time):
+ mock_cleanup_mano):
- mock_dict = {'delete_vnffgs': DEFAULT,
- 'delete_vnffgds': DEFAULT,
- 'delete_vnfs': DEFAULT,
- 'delete_vnfds': DEFAULT,
- 'delete_vims': DEFAULT}
- with patch.multiple('sfc.lib.cleanup',
- **mock_dict) as mock_values:
-
- cleanup.cleanup_from_bash(self.odl_ip,
- self.odl_port)
+ cleanup.cleanup_from_bash(self.odl_ip,
+ self.odl_port,
+ 'mano')
- for key in mock_values:
- mock_values[key].assert_called_once()
mock_cleanup_odl.assert_called_once_with(self.odl_ip,
self.odl_port)
- mock_time.assert_called_once_with(20)
+ mock_cleanup_mano.assert_called_once_with(mano='mano')
diff --git a/sfc/unit_tests/unit/lib/test_odl_utils.py b/sfc/unit_tests/unit/lib/test_odl_utils.py
new file mode 100644
index 00000000..1dfcf1ed
--- /dev/null
+++ b/sfc/unit_tests/unit/lib/test_odl_utils.py
@@ -0,0 +1,817 @@
+#!/usr/bin/env python
+
+###############################################################################
+# Copyright (c) 2018 Venkata Harshavardhan Reddy Allu and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+import unittest
+
+from mock import call
+from mock import Mock
+from mock import patch
+
+import sfc.lib.odl_utils as odl_utils
+
+__author__ = "Harshavardhan Reddy <venkataharshavardhan_ven@srmuniv.edu.in>"
+
+
+class SfcOdlUtilsTesting(unittest.TestCase):
+
+ @patch('re.compile', autospec=True)
+ @patch('opnfv.utils.ovs_logger.OVSLogger', autospec=True)
+ def test_actual_rsps_in_compute(self, mock_ovs_log, mock_compile):
+ """
+ Checks the proper functionality of actual_rsps_in_compute
+ function
+ """
+
+ match_calls = [call('msg_1'), call('msg_2')]
+
+ mf = Mock()
+ mf.group.side_effect = ['msg_p_1', 'msg_p_2']
+ mock_compile.return_value.match.side_effect = [mf, None]
+ mock_ovs_log.ofctl_dump_flows.return_value = '\nflow_rep\nmsg_1\nmsg_2'
+
+ result = odl_utils.actual_rsps_in_compute(mock_ovs_log, 'compute_ssh')
+
+ self.assertEqual(['msg_p_1|msg_p_2'], result)
+ mock_compile.return_value.match.assert_has_calls(match_calls)
+ mock_ovs_log.ofctl_dump_flows.assert_called_once_with('compute_ssh',
+ 'br-int', '101')
+
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.get_rsp', autospec=True)
+ @patch('sfc.lib.odl_utils.get_odl_acl_list', autospec=True)
+ @patch('sfc.lib.odl_utils.get_rsps_from_netvirt_acl_actions',
+ autospec=True)
+ def test_get_active_rsps_on_ports(self,
+ mock_rsps_from_netvirt_acl_actions,
+ mock_odl_acl_list,
+ mock_get_rsp,
+ mock_log):
+ """
+ Checks the proper functionality of get_active_rsps_on_ports
+ function
+ """
+
+ log_calls = [call('ACL acl_obj_one does not have an ACE')]
+
+ port_one = Mock()
+ port_two = Mock()
+ port_one.id = 's_p'
+ port_two.id = 'd_p'
+ neutron_ports = [port_one, port_two]
+
+ mock_rsps_from_netvirt_acl_actions.return_value = ['rsp_obj_one',
+ 'rsp_obj_two']
+
+ mock_get_rsp.side_effect = [{'of-matches': ['of-match-one'],
+ 'reverse-path': 'r-path-one'},
+ {'of-matches': ['of-match-two']}]
+
+ mock_odl_acl_list.return_value = {'access-lists': {'acl': [
+ {'acl-name': 'acl_obj_one',
+ 'access-list-entries': {'ace': []}},
+ {'acl-name': 'acl_obj_two',
+ 'access-list-entries': {'ace': [{'matches': {
+ 'destination-port-range': None}}]}},
+ {'acl-name': 'acl_obj_three',
+ 'access-list-entries': {'ace': [{'matches': {
+ 'destination-port-range': {'lower-port': 22},
+ 'netvirt-sfc-acl:source-port-uuid': 's_p_uuid',
+ 'netvirt-sfc-acl:destination-port-uuid': 'd_p_uuid'}}]}},
+ {'acl-name': 'acl_obj_four',
+ 'access-list-entries': {'ace': [{'matches': {
+ 'destination-port-range': {'lower-port': 22},
+ 'netvirt-sfc-acl:source-port-uuid': 's_p',
+ 'netvirt-sfc-acl:destination-port-uuid': 'd_p'},
+ 'actions': 'm_actions'}]}}]}}
+
+ expected = [{'of-matches': ['of-match-two', 'tp_dst=22']},
+ {'of-matches': ['of-match-one', 'tp_src=22'],
+ 'reverse-path': 'r-path-one'}]
+
+ result = odl_utils.get_active_rsps_on_ports('odl_ip',
+ 'odl_port',
+ neutron_ports)
+
+ self.assertEqual(sorted(expected), sorted(result))
+ mock_log.warn.assert_has_calls(log_calls)
+ mock_rsps_from_netvirt_acl_actions.assert_called_once_with('odl_ip',
+ 'odl_port',
+ 'm_actions')
+
+ @patch('sfc.lib.odl_utils.get_odl_resource_elem', autospec=True)
+ def test_get_rsps_from_netvirt_acl_actions(self, mock_odl_resource_elem):
+ """
+ Checks the proper functionality of get_rsps_from_netvirt_acl_actions
+ function
+ """
+
+ netv = {'netvirt-sfc-acl:rsp-name': 'rsp-name',
+ 'netvirt-sfc-acl:sfp-name': 'sfp-name'}
+
+ sfp_state = {'sfp-rendered-service-path': [{'name': 'sfp-rsp-one'},
+ {'name': 'sfp-rsp-two'}]}
+
+ mock_odl_resource_elem.return_value = sfp_state
+ rsp_names = ['rsp-name', 'sfp-rsp-one', 'sfp-rsp-two']
+
+ result = odl_utils.get_rsps_from_netvirt_acl_actions('odl_ip',
+ 'odl_port',
+ netv)
+ self.assertEqual(rsp_names, result)
+ mock_odl_resource_elem.assert_called_once_with('odl_ip', 'odl_port',
+ 'service-function-path-'
+ 'state', 'sfp-name',
+ datastore='operational')
+
+ @patch('sfc.lib.odl_utils.get_odl_resource_elem',
+ autospec=True, return_value='mocked_rsp')
+ def test_get_rsp(self, mock_odl_resource_elem):
+ """
+ Checks the proper functionality of get_rsp
+ function
+ """
+
+ result = odl_utils.get_rsp('odl_ip', 'odl_port', 'rsp_name')
+ self.assertEqual('mocked_rsp', result)
+ mock_odl_resource_elem.assert_called_once_with('odl_ip', 'odl_port',
+ 'rendered-service-path',
+ 'rsp_name',
+ datastore='operational')
+
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_promised_rsps_in_compute(self, mock_active_rsps_on_ports):
+ """
+ Checks the proper functionality of propmised_rsps_in_compute
+ function
+ """
+
+ mock_active_rsps_on_ports.return_value = [
+ {'of-matches': {'one': 'one'}, 'path-id': 1},
+ {'of-matches': {'two': 'two'}, 'path-id': 2}]
+
+ result = odl_utils.promised_rsps_in_compute('odl_ip', 'odl_port',
+ 'compute_ports')
+
+ self.assertEqual(['0x1|one', '0x2|two'], result)
+ mock_active_rsps_on_ports.assert_called_once_with('odl_ip', 'odl_port',
+ 'compute_ports')
+
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('time.time', autospec=True, side_effect=[1, 2])
+ def test_timethis(self,
+ mock_time,
+ mock_log):
+ """
+ Checks the proper functionality of timethis
+ function
+ """
+
+ expected = ('mock_this', '1')
+ log_calls = [call("mock_func(*('mock',), **{'name': 'this'}) "
+ "took: 1 sec")]
+
+ @odl_utils.timethis
+ def mock_func(msg, name=''):
+ return msg+'_'+name
+
+ result = mock_func('mock', name='this')
+ self.assertEqual(result, expected)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.get_odl_items', autospec=True)
+ @patch('sfc.lib.odl_utils.promised_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.os_sfc_utils.get_tacker_items', autospec=True)
+ def test_wait_for_classification_rules_rsps_not_configured(
+ self, mock_get_tacker_items, mock_promised_rsps_in_compute,
+ mock_get_odl_items, mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of wait_for_classification_rules
+ function when rsps are not configured in ODL
+ """
+
+ log_calls = [call("Error when waiting for classification rules: "
+ "RSPs not configured in ODL")]
+
+ mock_find_compute.return_value = 'mock_compute'
+ mock_promised_rsps_in_compute.return_value = None
+
+ odl_utils.wait_for_classification_rules('ovs_logger',
+ 'compute_nodes',
+ 'odl_ip',
+ 'odl_port',
+ 'compute_name',
+ 'neutron_ports')
+ mock_promised_rsps_in_compute.assert_called_with('odl_ip',
+ 'odl_port',
+ 'neutron_ports')
+ assert mock_promised_rsps_in_compute.call_count == 10
+ mock_find_compute.assert_called_once_with('compute_name',
+ 'compute_nodes')
+ mock_sleep.assert_called_with(3)
+ assert mock_sleep.call_count == 9
+ mock_get_tacker_items.assert_called_once_with()
+ mock_get_odl_items.assert_called_once_with('odl_ip', 'odl_port')
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.promised_rsps_in_compute', autospec=True)
+ def test_wait_for_classification_rules_timeout_not_updated(
+ self, mock_promised_rsps_in_compute, mock_actual_rsps_in_compute,
+ mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of wait_for_classification_rules
+ function when classification rules are not updated in a given timeout
+ """
+
+ log_calls = [call("Timeout but classification rules are not updated"),
+ call("RSPs in ODL Operational DataStore"
+ "for compute 'compute_name':"),
+ call("['compute|rsps']"),
+ call("RSPs in compute nodes:"),
+ call("[]")]
+
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_find_compute.return_value = mock_compute
+ mock_actual_rsps_in_compute.return_value = []
+ mock_promised_rsps_in_compute.return_value = ['compute|rsps']
+
+ odl_utils.wait_for_classification_rules('ovs_logger',
+ 'compute_nodes',
+ 'odl_ip',
+ 'odl_port',
+ 'compute_name',
+ 'neutron_ports',
+ timeout=2)
+ mock_find_compute.assert_called_once_with('compute_name',
+ 'compute_nodes')
+ mock_log.error.assert_has_calls(log_calls[:1])
+ mock_log.info.assert_has_calls(log_calls[1:])
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.promised_rsps_in_compute', autospec=True)
+ def test_wait_for_classification_rules_updated(
+ self, mock_promised_rsps_in_compute, mock_actual_rsps_in_compute,
+ mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of wait_for_classification_rules
+ function when classification rules are not updated in a given timeout
+ """
+
+ log_calls = [call("RSPs in ODL Operational DataStore"
+ "for compute 'compute_name':"),
+ call("['compute|rsps']"),
+ call("RSPs in compute nodes:"),
+ call("['compute|rsps']"),
+ call("Classification rules were updated")]
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_find_compute.return_value = mock_compute
+ mock_actual_rsps_in_compute.return_value = ['compute|rsps']
+ mock_promised_rsps_in_compute.return_value = ['compute|rsps']
+
+ odl_utils.wait_for_classification_rules('ovs_logger',
+ 'compute_nodes',
+ 'odl_ip',
+ 'odl_port',
+ 'compute_name',
+ 'neutron_ports',
+ timeout=2)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('re.search', autospec=True)
+ @patch('ConfigParser.RawConfigParser', autospec=True)
+ @patch('os.getcwd', autospec=True, return_value='/etc')
+ @patch('os.path.join', autospec=True, return_value='/etc/ml2_conf.ini')
+ def test_get_odl_ip_port(self, mock_join,
+ mock_getcwd,
+ mock_rawconfigparser,
+ mock_search):
+ """
+ Checks the proper functionality of get_odl_ip_port
+ function
+ """
+
+ cmd_calls = [call('pwd'),
+ call('sudo cp /etc/neutron/plugins/ml2/ml2_conf.ini '
+ '/etc/'),
+ call('sudo chmod 777 /etc/ml2_conf.ini')]
+
+ n1 = Mock()
+ n2 = Mock()
+ nodes = [n1, n2]
+ mock_rawconfigparser.return_value.get.return_value = 'config'
+ mock_search.return_value.group.return_value = 'odl_ip:odl_port'
+ n1.run_cmd.side_effect = ['/etc', '', '']
+
+ result = odl_utils.get_odl_ip_port(nodes)
+ self.assertEqual(('odl_ip', 'odl_port'), result)
+ n1.run_cmd.assert_has_calls(cmd_calls)
+ n1.is_controller.assert_called_once_with()
+ mock_getcwd.assert_called_once_with()
+ mock_join.assert_called_once_with('/etc', 'ml2_conf.ini')
+ n1.get_file.assert_called_once_with('/etc/ml2_conf.ini',
+ '/etc/ml2_conf.ini')
+ mock_rawconfigparser.return_value.read.assert_called_once_with(
+ '/etc/ml2_conf.ini')
+ mock_rawconfigparser.return_value.get.assert_called_with(
+ 'ml2_odl', 'url')
+ mock_search.assert_called_once_with(r'[0-9]+(?:\.[0-9]+){3}\:[0-9]+',
+ 'config')
+
+ @patch('re.search', autospec=True)
+ @patch('ConfigParser.RawConfigParser', autospec=True)
+ @patch('os.getcwd', autospec=True, return_value='/etc')
+ @patch('os.path.join', autospec=True, return_value='/etc/ml2_conf.ini')
+ def test_get_odl_username_password(self, mock_join,
+ mock_getcwd,
+ mock_rawconfigparser,
+ mock_search):
+ """
+ Check the proper functionality of get odl_username_password
+ function
+ """
+
+ mock_rawconfigparser.return_value.get.return_value = 'odl_username'
+ result = odl_utils.get_odl_username_password()
+ self.assertEqual(('odl_username'), result[0])
+ mock_getcwd.assert_called_once_with()
+ mock_join.assert_called_once_with('/etc', 'ml2_conf.ini')
+ mock_rawconfigparser.return_value.read.assert_called_once_with(
+ '/etc/ml2_conf.ini')
+ mock_rawconfigparser.return_value.get.return_value = 'odl_password'
+ result = odl_utils.get_odl_username_password()
+ self.assertEqual(('odl_password'), result[1])
+
+ def test_pluralize(self):
+ """
+ Checks the proper functionality of pluralize
+ function
+ """
+
+ result = odl_utils.pluralize('service-function-path')
+ self.assertEqual('service-function-paths', result)
+
+ def test_get_module(self):
+ """
+ Checks the proper functionality of get_module
+ function
+ """
+
+ result = odl_utils.get_module('service-function-path')
+ self.assertEqual('service-function-path', result)
+
+ @patch('sfc.lib.odl_utils.get_module',
+ autospec=True, return_value='mocked_module')
+ @patch('sfc.lib.odl_utils.pluralize',
+ autospec=True, return_value='resources')
+ def test_format_odl_resource_list_url(self, mock_plularize,
+ mock_get_module):
+ """
+ Checks the proper functionality of format_odl_resource_list_url
+ function
+ """
+
+ result = odl_utils.format_odl_resource_list_url('odl_ip',
+ 'odl_port',
+ 'resource')
+ formatted_url = ('http://admin:admin@odl_ip:'
+ 'odl_port/restconf/config/mocked_module:'
+ 'resources')
+ self.assertEqual(formatted_url, result)
+ mock_plularize.assert_called_once_with('resource')
+ mock_get_module.assert_called_once_with('resource')
+
+ @patch('sfc.lib.odl_utils.format_odl_resource_list_url',
+ autospec=True, return_value='list_u/r/l')
+ def test_format_odl_resource_elem_url(self, mock_odl_resource_list_url):
+ """
+ Checks the proper functionality of format_odl_resource_elem_url
+ function
+ """
+
+ result = odl_utils.format_odl_resource_elem_url('odl_ip', 'odl_port',
+ 'resource',
+ 'elem_name')
+ formatted_url = ('list_u/r/l/resource/elem_name')
+ self.assertEqual(formatted_url, result)
+ mock_odl_resource_list_url.assert_called_once_with('odl_ip',
+ 'odl_port',
+ 'resource',
+ 'config')
+
+ @patch('sfc.lib.odl_utils.pluralize',
+ autospec=True, return_value='resources')
+ def test_odl_resource_list_names_returns_empty_list(self, mock_plularize):
+ """
+ Checks the proper functionality of odl_resource_list_names
+ function when resources are empty
+ """
+
+ resource_json = {'resources': {}}
+ result = odl_utils.odl_resource_list_names('resource', resource_json)
+ self.assertEqual([], result)
+
+ @patch('sfc.lib.odl_utils.pluralize',
+ autospec=True, return_value='resources')
+ def test_odl_resource_list_names(self, mock_plularize):
+ """
+ Checks the proper functionality of odl_resource_list_names
+ function
+ """
+
+ resource_json = {'resources': {'resource': [{'name': 'resource_one'},
+ {'name': 'resource_two'}]}}
+ result = odl_utils.odl_resource_list_names('resource', resource_json)
+ self.assertEqual(['resource_one', 'resource_two'], result)
+
+ @patch('requests.get', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_resource_list_url', autospec=True)
+ def test_get_odl_resource_list(self,
+ mock_odl_resource_list_url,
+ mock_get):
+ """
+ Checks the proper functionality of get_odl_resource_list
+ function
+ """
+
+ mock_odl_resource_list_url.return_value = 'u/r/l'
+ mock_get.return_value.json.return_value = {'key': 'value'}
+
+ result = odl_utils.get_odl_resource_list('odl_ip',
+ 'odl_port',
+ 'resource')
+
+ self.assertEqual({'key': 'value'}, result)
+ mock_odl_resource_list_url.assert_called_once_with('odl_ip',
+ 'odl_port',
+ 'resource',
+ datastore='config')
+ mock_get.assert_called_once_with('u/r/l')
+
+ @patch('requests.get', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_resource_elem_url', autospec=True)
+ def test_get_odl_resource_elem(self,
+ mock_odl_resource_elem_url,
+ mock_get):
+ """
+ Checks the proper functionality of get_odl_resource_elem
+ function
+ """
+
+ mock_response = Mock()
+ mock_response.get.return_value = ['elem_one', 'elem_two']
+ mock_get.return_value.json.return_value = mock_response
+ mock_odl_resource_elem_url.return_value = 'u/r/l'
+
+ result = odl_utils.get_odl_resource_elem(
+ 'odl_ip', 'odl_port', 'resource', 'elem_name')
+
+ self.assertEqual('elem_one', result)
+ mock_odl_resource_elem_url.assert_called_once_with(
+ 'odl_ip', 'odl_port', 'resource', 'elem_name', 'config')
+ mock_get.assert_called_once_with('u/r/l')
+ mock_response.get.assert_called_once_with('resource', [{}])
+
+ @patch('requests.delete', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_resource_elem_url',
+ autospec=True, return_value='u/r/l')
+ def test_delete_odl_resource_elem(self,
+ mock_odl_resource_elem_url,
+ mock_delete):
+ """
+ Checks the proper functionality of delete_odl_resource_elem
+ function
+ """
+
+ odl_utils.delete_odl_resource_elem('odl_ip', 'odl_port', 'resource',
+ 'elem_name')
+
+ mock_odl_resource_elem_url('odl_ip', 'odl_port', 'resource',
+ 'elem_name', 'config')
+ mock_delete.assert_called_once_with('u/r/l')
+
+ def test_odl_acl_types_names_returns_empty_list(self):
+ """
+ Checks the proper functionality of odl_acl_types_names
+ function when access lists are empty
+ """
+
+ acl_json = {'access-lists': {}}
+ result = odl_utils.odl_acl_types_names(acl_json)
+ self.assertEqual([], result)
+
+ def test_odl_acl_types_names(self):
+ """
+ Checks the proper functionality of odl_acl_types_names
+ function
+ """
+
+ acl_json = {'access-lists': {'acl': [{'acl-type': 'type-one',
+ 'acl-name': 'name-one'},
+ {'acl-type': 'type-two',
+ 'acl-name': 'name-two'}]}}
+ acl_types = [('type-one', 'name-one'),
+ ('type-two', 'name-two')]
+
+ result = odl_utils.odl_acl_types_names(acl_json)
+ self.assertEqual(acl_types, result)
+
+ def test_format_odl_acl_list_url(self):
+ """
+ Checks the proper functionality of format_odl_acl_list_url
+ function
+ """
+
+ formatted_url = ('http://admin:admin@odl_ip:odl_port/restconf/config/'
+ 'ietf-access-control-list:access-lists')
+ result = odl_utils.format_odl_acl_list_url('odl_ip', 'odl_port')
+ self.assertEqual(formatted_url, result)
+
+ @patch('json.dumps',
+ autospec=True, return_value='{\n "key": "value"\n}')
+ def test_improve_json_layout(self, mock_dumps):
+ """
+ Checks the proper functionality of improve_json_layout
+ function
+ """
+
+ result = odl_utils.improve_json_layout({'key': 'value'})
+
+ self.assertEqual('{\n "key": "value"\n}', result)
+ mock_dumps.assert_called_once_with({'key': 'value'},
+ indent=4,
+ separators=(',', ': '))
+
+ @patch('requests.get', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_acl_list_url',
+ autospec=True, return_value='acl_list_u/r/l')
+ @patch('sfc.lib.odl_utils.improve_json_layout', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_resource_list_url', autospec=True)
+ def test_get_odl_items(self,
+ mock_odl_resource_list_url,
+ mock_json_layout,
+ mock_odl_acl_list_url,
+ mock_log,
+ mock_get):
+ """
+ Checks the proper functionality of get_odl_items
+ function
+ """
+
+ log_calls = [call('Configured ACLs in ODL: r_acl_j_s_o_n'),
+ call('Configured SFs in ODL: r_sf_j_s_o_n'),
+ call('Configured SFFs in ODL: r_sff_j_s_o_n'),
+ call('Configured SFCs in ODL: r_sfc_j_s_o_n'),
+ call('Configured RSPs in ODL: r_sp_j_s_o_n')]
+
+ resource_list_url_calls = [call('odl_ip', 'odl_port',
+ 'service-function'),
+ call('odl_ip', 'odl_port',
+ 'service-function-forwarder'),
+ call('odl_ip', 'odl_port',
+ 'service-function-chain'),
+ call('odl_ip', 'odl_port',
+ 'rendered-service-path',
+ datastore='operational')]
+
+ resource_list_urls = ['sf_list_u/r/l', 'sff_list_u/r/l',
+ 'sfc_list_u/r/l', 'rsp_list_u/r/l']
+
+ get_calls = [call(url) for url in resource_list_urls]
+
+ mock_odl_resource_list_url.side_effect = resource_list_urls
+
+ mock_get.return_value.json.side_effect = ['r_acl_json', 'r_sf_json',
+ 'r_sff_json', 'r_sfc_json',
+ 'r_rsp_json']
+
+ mock_json_layout.side_effect = ['r_acl_j_s_o_n', 'r_sf_j_s_o_n',
+ 'r_sff_j_s_o_n', 'r_sfc_j_s_o_n',
+ 'r_sp_j_s_o_n']
+
+ odl_utils.get_odl_items('odl_ip', 'odl_port')
+
+ mock_odl_acl_list_url.assert_called_once_with('odl_ip', 'odl_port')
+ mock_odl_resource_list_url.assert_has_calls(resource_list_url_calls)
+ mock_get.assert_has_calls(get_calls, any_order=True)
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('requests.get', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_acl_list_url', autospec=True)
+ def test_get_odl_acl_list(self,
+ mock_acl_list_url,
+ mock_get):
+ """
+ Checks the proper functionality of get_odl_acl_list
+ function
+ """
+
+ mock_acl_list_url.return_value = 'acl_list/url'
+ mock_get.return_value.json.return_value = {'key': 'value'}
+ result = odl_utils.get_odl_acl_list('odl_ip', 'odl_port')
+ mock_acl_list_url.assert_called_once_with('odl_ip', 'odl_port')
+ mock_get.assert_called_once_with('acl_list/url')
+ self.assertEqual({'key': 'value'}, result)
+
+ @patch('requests.delete', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_acl_list_url', autospec=True)
+ def test_delete_odl_acl(self,
+ mock_acl_list_url,
+ mock_delete):
+ """
+ Checks the proper functionality of delete_odl_acl
+ function
+ """
+
+ mock_acl_list_url.return_value = 'acl_list/url'
+
+ odl_utils.delete_odl_acl('odl_ip', 'odl_port', 'acl_type', 'acl_name')
+
+ mock_acl_list_url.assert_called_once_with('odl_ip', 'odl_port')
+ mock_delete.assert_called_once_with(
+ 'acl_list/url/acl/acl_type/acl_name')
+
+ @patch('sfc.lib.odl_utils.delete_odl_acl', autospec=True)
+ def test_delete_acl(self, mock_delete_odl_acl):
+ """
+ Checks the proper fucntionality of delete_acl
+ function
+ """
+
+ odl_utils.delete_acl('clf_name', 'odl_ip', 'odl_port')
+ mock_delete_odl_acl.assert_called_once_with(
+ 'odl_ip',
+ 'odl_port',
+ 'ietf-access-control-list:ipv4-acl',
+ 'clf_name')
+
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ def test_find_compute_raises_exception(self, mock_log):
+ """
+ Checks the proper functionality of find_compute
+ function when compute was not found in the client
+ """
+
+ ErrorMSG = 'No compute, where the client is, was found'
+ compute_node_one = Mock()
+ compute_node_two = Mock()
+ compute_nodes = [compute_node_one, compute_node_two]
+ compute_node_one.name = 'compute_one'
+ compute_node_two.name = 'compute_two'
+
+ with self.assertRaises(Exception) as cm:
+ odl_utils.find_compute('compute_client', compute_nodes)
+
+ self.assertEqual(ErrorMSG, cm.exception.message)
+ mock_log.debug.assert_called_once_with(ErrorMSG)
+
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ def test_find_compute(self, mock_log):
+ """
+ Checks the proper functionality of find_compute
+ function when compute was not found in the client
+ """
+
+ compute_node_one = Mock()
+ compute_node_two = Mock()
+ compute_nodes = [compute_node_one, compute_node_two]
+ compute_node_one.name = 'compute_one'
+ compute_node_two.name = 'compute_two'
+
+ result = odl_utils.find_compute('compute_two', compute_nodes)
+
+ self.assertEqual(compute_node_two, result)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_check_vnffg_deletion_returns_false_rsps_still_active(
+ self, mock_active_rsps_on_ports,
+ mock_log, mock_sleep):
+ """
+ Checks the proper functionality of check_vnffg_deletion
+ function to verify that it returns false on the given condition
+ """
+
+ log_calls = [call('RSPs are still active in the MD-SAL')]
+ mock_active_rsps_on_ports.return_value = True
+ result = odl_utils.check_vnffg_deletion('odl_ip', 'odl_port',
+ 'ovs_logger', 'neutron_ports',
+ 'compute_client_name',
+ 'compute_nodes', retries=1)
+ self.assertFalse(result)
+ mock_active_rsps_on_ports.assert_called_once_with('odl_ip', 'odl_port',
+ 'neutron_ports')
+ mock_sleep.assert_called_once_with(3)
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_check_vnffg_deletion_returns_false_error_getting_compute(
+ self, mock_active_rsps_on_ports, mock_actual_rsps,
+ mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of check_vnffg_deletion
+ function to verify that it returns false on the given condition
+ """
+
+ log_calls = [call('There was an error getting the compute: ErrorMSG')]
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_find_compute.side_effect = [Exception('ErrorMSG'), mock_compute]
+ mock_active_rsps_on_ports.side_effect = [True, False]
+ result = odl_utils.check_vnffg_deletion('odl_ip', 'odl_port',
+ 'ovs_logger', 'neutron_ports',
+ 'compute_client_name',
+ 'compute_nodes', retries=2)
+ self.assertFalse(result)
+ mock_sleep.assert_called_once_with(3)
+ mock_find_compute.assert_called_once_with('compute_client_name',
+ 'compute_nodes')
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_check_vnffg_deletion_returns_false_classification_flow_in_compute(
+ self, mock_active_rsps_on_ports, mock_actual_rsps,
+ mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of check_vnffg_deletion
+ function to verify that it returns false on the given condition
+ """
+
+ log_calls = [call('Classification flows still in the compute')]
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_find_compute.return_value = mock_compute
+ mock_actual_rsps.side_effect = [True, True]
+ mock_active_rsps_on_ports.side_effect = [True, False]
+ result = odl_utils.check_vnffg_deletion('odl_ip', 'odl_port',
+ 'ovs_logger', 'neutron_ports',
+ 'compute_client_name',
+ 'compute_nodes', retries=2)
+ self.assertFalse(result)
+ mock_actual_rsps.assert_called_with('ovs_logger', 'mock_ssh_client')
+ mock_sleep.assert_called_with(3)
+ mock_find_compute.assert_called_once_with('compute_client_name',
+ 'compute_nodes')
+ assert mock_sleep.call_count == 3
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_check_vnffg_deletion_returns_true(self,
+ mock_active_rsps_on_ports,
+ mock_actual_rsps,
+ mock_find_compute,
+ mock_log, mock_sleep):
+ """
+ Checks the proper functionality of check_vnffg_deletion
+ function to verify that it returns true
+ """
+
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_active_rsps_on_ports.side_effect = [True, False]
+
+ mock_actual_rsps.side_effect = [True, False]
+
+ mock_find_compute.return_value = mock_compute
+
+ result = odl_utils.check_vnffg_deletion('odl_ip', 'odl_port',
+ 'ovs_logger', 'neutron_ports',
+ 'compute_client_name',
+ 'compute_nodes', retries=2)
+ self.assertTrue(result)
+ mock_find_compute.assert_called_once_with('compute_client_name',
+ 'compute_nodes')
+ assert mock_sleep.call_count == 2
+ mock_log.assert_not_called()
diff --git a/sfc/unit_tests/unit/lib/test_openstack_utils.py b/sfc/unit_tests/unit/lib/test_openstack_utils.py
new file mode 100644
index 00000000..bdd53d36
--- /dev/null
+++ b/sfc/unit_tests/unit/lib/test_openstack_utils.py
@@ -0,0 +1,2504 @@
+#!/usr/bin/env python
+
+###############################################################################
+# Copyright (c) 2018 Venkata Harshavardhan Reddy Allu and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+import unittest
+
+from mock import call
+from mock import Mock
+from mock import patch
+from mock import mock_open
+from mock import DEFAULT
+from mock import MagicMock
+
+import sfc.lib.openstack_utils as os_sfc_utils
+from tackerclient.v1_0 import client as tacker_client
+
+__author__ = "Harshavardhan Reddy <venkataharshavardhan_ven@srmuniv.edu.in>"
+
+
+class SfcOpenStackUtilsTesting(unittest.TestCase):
+
+ def setUp(self):
+ self.patcher1 = patch.object(os_sfc_utils.constants,
+ 'ENV_FILE', autospec=True)
+ self.patcher2 = patch.object(os_sfc_utils.openstack_tests,
+ 'get_credentials', autospec=True)
+ self.patcher3 = patch.object(os_sfc_utils.nova_utils,
+ 'nova_client', autospec=True)
+ self.patcher4 = patch.object(os_sfc_utils.neutron_utils,
+ 'neutron_client', autospec=True)
+ self.patcher5 = patch.object(os_sfc_utils.heat_utils,
+ 'heat_client', autospec=True)
+ self.patcher6 = patch.object(os_sfc_utils.keystone_utils,
+ 'keystone_client', autospec=True)
+ self.patcher7 = patch.object(os_sfc_utils.connection,
+ 'from_config', autospec=True,)
+ self.patcher8 = patch.object(os_sfc_utils.neutronclient,
+ 'Client', autospec=True,)
+
+ self.env_file = self.patcher1.start().return_value
+ self.os_creds = self.patcher2.start().return_value
+ self.nova = self.patcher3.start().return_value
+ self.neutron = self.patcher4.start().return_value
+ self.heat = self.patcher5.start().return_value
+ self.keystone = self.patcher6.start().return_value
+ self.conn = self.patcher7.start().return_value
+ self.neutron_client = self.patcher8.start().return_value
+
+ self.os_sfc = os_sfc_utils.OpenStackSFC()
+
+ def tearDown(self):
+ self.patcher1.stop()
+ self.patcher2.stop()
+ self.patcher3.stop()
+ self.patcher4.stop()
+ self.patcher5.stop()
+ self.patcher6.stop()
+ self.patcher7.stop()
+ self.patcher8.stop()
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('os.environ', {'OS_NETWORK_API_VERSION': '1'})
+ def test_get_neutron_client_version(self,
+ mock_log):
+ """
+ Checks the proper functionality of get_neutron_client_version
+ """
+ log_calls = [call("OS_NETWORK_API_VERSION is 1")]
+ result = self.os_sfc.get_neutron_client_version()
+ assert result == '1'
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_register_glance_image_already_exists(self,
+ mock_log):
+ """
+ Checks the proper functionality of register_glance_image
+ function when the image is local
+ """
+ image_obj = Mock()
+ image_obj.name = 'name'
+ log_calls = [call('Registering the image...'),
+ call('Image ' + image_obj.name + ' already exists.')]
+
+ self.conn.image.find_image.return_value = image_obj
+ result = self.os_sfc.register_glance_image('name',
+ 'url',
+ 'img_format',
+ 'public')
+
+ self.conn.image.find_image.assert_called_once_with(image_obj.name)
+
+ assert result is image_obj
+
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch("__builtin__.open", autospec=True)
+ def test_register_glance_image_is_local(self,
+ mock_open_fn,
+ mock_log):
+ """
+ Checks the proper functionality of register_glance_image
+ function when the image is local
+ """
+ log_calls = [call('Registering the image...'),
+ call('Image created')]
+
+ image_obj_None = None
+ image_obj_name = 'name'
+ image_obj = Mock()
+ mocked_file = mock_open(read_data='url').return_value
+ mock_open_fn.return_value = mocked_file
+
+ self.conn.image.find_image.return_value = image_obj_None
+ self.conn.image.upload_image.return_value = image_obj
+ result = self.os_sfc.register_glance_image('name',
+ 'url',
+ 'img_format',
+ 'public')
+ assert result is image_obj
+
+ self.conn.image.find_image.assert_called_once_with(image_obj_name)
+
+ self.conn.image.upload_image.\
+ assert_called_once_with(name='name',
+ disk_format='img_format',
+ data='url',
+ is_public='public',
+ container_format='bare')
+
+ self.assertEqual([image_obj], self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.urllib2.urlopen', autospec=True)
+ def test_register_glance_image_is_not_local(self,
+ mock_urlopen,
+ mock_log):
+ """
+ Checks the proper functionality of register_glance_image
+ function when the image is not local
+ """
+ log_calls = [call('Registering the image...'),
+ call('Downloading image'),
+ call('Image created')]
+
+ image_obj_None = None
+ image_obj_name = 'name'
+ image_obj = Mock()
+ mock_file = Mock()
+ mock_file.read.side_effect = ['http://url']
+ mock_urlopen.return_value = mock_file
+
+ self.conn.image.find_image.return_value = image_obj_None
+ self.conn.image.upload_image.return_value = image_obj
+
+ result = self.os_sfc.register_glance_image('name',
+ 'http://url',
+ 'img_format',
+ 'public')
+
+ assert result is image_obj
+
+ self.conn.image.find_image.assert_called_once_with(image_obj_name)
+
+ self.conn.image.upload_image.\
+ assert_called_once_with(name='name',
+ disk_format='img_format',
+ data='http://url',
+ is_public='public',
+ container_format='bare')
+
+ self.assertEqual([image_obj], self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_flavour(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_flavor
+ function
+ """
+
+ mock_openstack_flavor_ins = self.conn.compute.\
+ create_flavor.return_value
+ log_calls = [call('Creating flavor...')]
+
+ result = self.os_sfc.create_flavor('name',
+ 'ram',
+ 'disk',
+ 'vcpus')
+ assert result is mock_openstack_flavor_ins
+ self.assertEqual([mock_openstack_flavor_ins],
+ self.os_sfc.creators)
+ self.conn.compute.create_flavor.\
+ assert_called_once_with(name='name',
+ ram='ram',
+ disk='disk',
+ vcpus='vcpus')
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.env.get', autospec=True)
+ def test_create_network_infrastructure(self, mock_env_get, mock_log):
+ log_calls = [call('Creating Networks...'),
+ call('Creating Router...')]
+ network_obj = Mock()
+ network_obj.id = '1'
+ subnet_obj = Mock()
+ subnet_obj.id = '2'
+ ext_network_obj = Mock()
+ ext_network_obj.id = '3'
+ router_obj = Mock()
+ router_obj.id = '4'
+
+ self.conn.network.create_network.return_value = network_obj
+ self.conn.network.create_subnet.return_value = subnet_obj
+ self.conn.network.find_network.return_value = ext_network_obj
+ self.conn.network.create_router.return_value = router_obj
+ self.conn.network.get_router.return_value = router_obj
+ mock_env_get.return_value = 'ext_net_name'
+
+ expected = (network_obj, router_obj)
+ result = self.os_sfc.create_network_infrastructure('net_name',
+ 'sn_name',
+ 'subnet_cidr',
+ 'router_name')
+ self.conn.network.create_network.\
+ assert_called_once_with(name='net_name')
+ self.conn.network.create_subnet.\
+ assert_called_once_with(name='sn_name', cidr='subnet_cidr',
+ network_id=network_obj.id, ip_version='4')
+ self.conn.network.find_network.\
+ assert_called_once_with('ext_net_name')
+ self.conn.network.create_router.\
+ assert_called_once_with(name='router_name')
+ self.conn.network.add_interface_to_router.\
+ assert_called_once_with(router_obj.id, subnet_id=subnet_obj.id)
+ self.conn.network.update_router.\
+ assert_called_once_with(
+ router_obj.id,
+ external_gateway_info={'network_id': ext_network_obj.id})
+ self.conn.network.get_router.assert_called_once_with(router_obj.id)
+
+ self.assertEqual(expected, result)
+ self.assertEqual([network_obj, subnet_obj, router_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_security_group(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_security_group
+ function
+ """
+ log_calls = [call('Creating the security groups...')]
+ sec_group_obj = Mock()
+ sec_group_obj.id = '1'
+
+ self.conn.network.create_security_group.return_value = sec_group_obj
+
+ result = self.os_sfc.create_security_group('sec_grp_name')
+ assert result is sec_group_obj
+
+ self.conn.network.create_security_group.\
+ assert_called_once_with(name='sec_grp_name')
+
+ pc_calls = [call(security_group_id=sec_group_obj.id,
+ direction='ingress',
+ protocol='icmp'),
+ call(security_group_id=sec_group_obj.id,
+ direction='ingress',
+ protocol='tcp',
+ port_range_min=22,
+ port_range_max=22),
+ call(security_group_id=sec_group_obj.id,
+ direction='ingress',
+ protocol='tcp',
+ port_range_min=80,
+ port_range_max=80)]
+
+ self.conn.network.create_security_group_rule.\
+ assert_has_calls(pc_calls)
+
+ self.assertEqual([sec_group_obj], self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_instance_port_security_false(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_instance
+ function
+ """
+
+ keypair_obj = Mock()
+ keypair_obj.name = 'keypair_name'
+ flavor_obj = Mock()
+ flavor_obj.id = '1'
+ port_obj1 = Mock()
+ port_obj1.id = '2'
+ port_obj2 = Mock()
+ port_obj2.id = '3'
+ instance_obj = Mock()
+ instance_obj.name = 'instance_name'
+ secgrp = Mock()
+ secgrp.name = 'sec_grp'
+ secgrp.id = '4'
+ img_cre = Mock()
+ img_cre.id = '5'
+ network = Mock()
+ network.id = '6'
+ ports = ['port1', 'port2']
+ port_security = False
+
+ log_calls = [call('Creating Key Pair vm_name...'),
+ call('Creating Port ' + str(ports) + '...'),
+ call('Creating the instance vm_name...'),
+ call('Waiting for instance_name to become Active'),
+ call('instance_name is active')]
+
+ self.conn.compute.create_keypair.return_value = keypair_obj
+ self.conn.compute.find_flavor.return_value = flavor_obj
+ self.conn.network.create_port.side_effect = [port_obj1, port_obj2]
+ self.conn.compute.create_server.return_value = instance_obj
+
+ port_obj_list = [port_obj1, port_obj2]
+
+ expected = (instance_obj, port_obj_list)
+ result = self.os_sfc.create_instance('vm_name',
+ 'flavor_name',
+ img_cre,
+ network,
+ secgrp,
+ 'av_zone',
+ ports,
+ port_security=port_security)
+ self.assertEqual(expected, result)
+
+ pc_calls = [call(name=ports[0],
+ is_port_security_enabled=port_security,
+ network_id=network.id),
+ call(name=ports[1],
+ is_port_security_enabled=port_security,
+ network_id=network.id)]
+
+ self.conn.compute.create_keypair.\
+ assert_called_once_with(name='vm_name' + "_keypair")
+
+ self.conn.compute.find_flavor.assert_called_once_with('flavor_name')
+
+ self.conn.network.create_port.\
+ assert_has_calls(pc_calls)
+
+ self.conn.compute.create_server.\
+ assert_called_once_with(name='vm_name',
+ image_id=img_cre.id,
+ flavor_id=flavor_obj.id,
+ networks=[{"port": port_obj1.id},
+ {"port": port_obj2.id}],
+ key_name=keypair_obj.name,
+ availability_zone='av_zone')
+
+ self.conn.compute.wait_for_server.\
+ assert_called_once_with(instance_obj)
+
+ self.assertEqual([keypair_obj, port_obj1, port_obj2, instance_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_instance(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_instance
+ function
+ """
+
+ keypair_obj = Mock()
+ keypair_obj.name = 'keypair_name'
+ flavor_obj = Mock()
+ flavor_obj.id = '1'
+ port_obj = Mock()
+ port_obj.id = '2'
+ instance_obj = Mock()
+ instance_obj.name = 'instance_name'
+ secgrp = Mock()
+ secgrp.name = 'sec_grp'
+ secgrp.id = '4'
+ img_cre = Mock()
+ img_cre.id = '5'
+ network = Mock()
+ network.id = '6'
+ ports = ['port1']
+ port_obj_list = [port_obj]
+ port_security = True
+
+ log_calls = [call('Creating Key Pair vm_name...'),
+ call('Creating Port ' + str(ports) + '...'),
+ call('Creating the instance vm_name...'),
+ call('Waiting for instance_name to become Active'),
+ call('instance_name is active')]
+
+ self.conn.compute.create_keypair.return_value = keypair_obj
+ self.conn.compute.find_flavor.return_value = flavor_obj
+ self.conn.network.create_port.return_value = port_obj
+ self.conn.compute.create_server.return_value = instance_obj
+ # self.conn.compute.wait_for_server.return_value = wait_ins_obj
+
+ expected = (instance_obj, port_obj_list)
+ result = self.os_sfc.create_instance('vm_name',
+ 'flavor_name',
+ img_cre,
+ network,
+ secgrp,
+ 'av_zone',
+ ports,
+ port_security=port_security)
+ self.assertEqual(expected, result)
+
+ pc_calls = [call(name=ports[0],
+ is_port_security_enabled=port_security,
+ network_id=network.id,
+ security_group_ids=[secgrp.id])]
+
+ self.conn.compute.create_keypair.\
+ assert_called_once_with(name='vm_name' + "_keypair")
+
+ self.conn.compute.find_flavor.assert_called_once_with('flavor_name')
+
+ self.conn.network.create_port.\
+ assert_has_calls(pc_calls)
+
+ self.conn.compute.create_server.\
+ assert_called_once_with(name='vm_name',
+ image_id=img_cre.id,
+ flavor_id=flavor_obj.id,
+ networks=[{"port": port_obj.id}],
+ key_name=keypair_obj.name,
+ availability_zone='av_zone')
+
+ self.conn.compute.wait_for_server.\
+ assert_called_once_with(instance_obj)
+
+ self.assertEqual([keypair_obj, port_obj, instance_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_instance_port_security_false_one_port(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_instance
+ function
+ """
+
+ keypair_obj = Mock()
+ keypair_obj.name = 'keypair_name'
+ flavor_obj = Mock()
+ flavor_obj.id = '1'
+ port_obj = Mock()
+ port_obj.id = '2'
+ instance_obj = Mock()
+ instance_obj.name = 'instance_name'
+ secgrp = Mock()
+ secgrp.name = 'sec_grp'
+ secgrp.id = '4'
+ img_cre = Mock()
+ img_cre.id = '5'
+ network = Mock()
+ network.id = '6'
+ ports = ['port1']
+ port_obj_list = [port_obj]
+ port_security = False
+
+ log_calls = [call('Creating Key Pair vm_name...'),
+ call('Creating Port ' + str(ports) + '...'),
+ call('Creating the instance vm_name...'),
+ call('Waiting for instance_name to become Active'),
+ call('instance_name is active')]
+
+ self.conn.compute.create_keypair.return_value = keypair_obj
+ self.conn.compute.find_flavor.return_value = flavor_obj
+ self.conn.network.create_port.return_value = port_obj
+ self.conn.compute.create_server.return_value = instance_obj
+
+ expected = (instance_obj, port_obj_list)
+ result = self.os_sfc.create_instance('vm_name',
+ 'flavor_name',
+ img_cre,
+ network,
+ secgrp,
+ 'av_zone',
+ ports,
+ port_security=port_security)
+ self.assertEqual(expected, result)
+
+ pc_calls = [call(name=ports[0],
+ is_port_security_enabled=port_security,
+ network_id=network.id)]
+
+ self.conn.compute.create_keypair.\
+ assert_called_once_with(name='vm_name' + "_keypair")
+
+ self.conn.compute.find_flavor.assert_called_once_with('flavor_name')
+
+ self.conn.network.create_port.\
+ assert_has_calls(pc_calls)
+
+ self.conn.compute.create_server.\
+ assert_called_once_with(name='vm_name',
+ image_id=img_cre.id,
+ flavor_id=flavor_obj.id,
+ networks=[{"port": port_obj.id}],
+ key_name=keypair_obj.name,
+ availability_zone='av_zone')
+
+ self.conn.compute.wait_for_server.\
+ assert_called_once_with(instance_obj)
+
+ self.assertEqual([keypair_obj, port_obj, instance_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ def test_get_instance(self):
+ """
+ Checks the proper functionality of get_instance function
+ """
+
+ mock_instance_id = 'instance-abyz'
+ mock_instance = Mock()
+ mock_instance.id = mock_instance_id
+ mock_instance.name = 'test-instance'
+ mock_instance.hypervisor_hostname = 'nova-abyz'
+ self.conn.compute.get_server_metadata.return_value = mock_instance
+ result = self.os_sfc.get_instance(mock_instance_id)
+ self.assertEqual(result, mock_instance)
+
+ @patch.object(os_sfc_utils.OpenStackSFC, 'get_hypervisor_hosts')
+ def test_get_av_zones(self, mock_hosts):
+ """
+ Checks the proper functionality of get_av_zone
+ function
+ """
+ mock_hosts.return_value = ['host1', 'host2']
+ result = self.os_sfc.get_av_zones()
+ mock_hosts.assert_called_once()
+ self.assertEqual(['nova::host1', 'nova::host2'], result)
+
+ def test_get_hypervisor_hosts(self):
+ """
+ Checks the proper functionality of get_av_zone
+ function
+ """
+ from openstack.compute.v2 import hypervisor
+
+ hypervisor1 = Mock()
+ hypervisor1.state = 'up'
+ hypervisor1.name = 'compute00'
+ hypervisor2 = Mock()
+ hypervisor2.state = 'up'
+ hypervisor2.name = 'compute01'
+ nodes = [hypervisor1.name, hypervisor2.name]
+ hypervisors_list = MagicMock()
+ mock_obj = patch.object(hypervisor, 'Hypervisor')
+ mock_obj.side_effect = [hypervisor1, hypervisor2]
+ self.conn.compute.hypervisors.return_value = hypervisors_list
+ hypervisors_list.__iter__.return_value = [hypervisor1, hypervisor2]
+
+ result = self.os_sfc.get_hypervisor_hosts()
+ self.conn.compute.hypervisors.assert_called_once()
+ self.assertEqual(nodes, result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_hypervisor_hosts_exception(self, mock_log):
+ """
+ Checks the proper functionality of get_av_zone
+ function when an exception appears
+ """
+ log_calls = [call('Error [get_hypervisors(compute)]: Error MSG')]
+ self.conn.compute.hypervisors.side_effect = Exception('Error MSG')
+ result = self.os_sfc.get_hypervisor_hosts()
+ mock_log.error.assert_has_calls(log_calls)
+ self.assertIsNone(result)
+
+ @patch('sfc.lib.openstack_utils.OpenStackSFC.get_vm_compute',
+ autospec=True, return_value='mock_client')
+ def test_compute_client(self, mock_get_vm_compute):
+ """
+ Checks the proper functionality of get_compute_client
+ function
+ """
+
+ result = self.os_sfc.get_compute_client()
+ self.assertEqual('mock_client', result)
+ mock_get_vm_compute.assert_called_once_with(self.os_sfc, 'client')
+
+ @patch('sfc.lib.openstack_utils.OpenStackSFC.get_vm_compute',
+ autospec=True, return_value='mock_server')
+ def test_get_compute_server(self, mock_get_vm_compute):
+ """
+ Checks the proper functionality of get_compute_server
+ function
+ """
+
+ result = self.os_sfc.get_compute_server()
+ self.assertEqual('mock_server', result)
+ mock_get_vm_compute.assert_called_once_with(self.os_sfc, 'server')
+
+ def test_get_vm_compute_raised_exception(self):
+ """
+ Checks the proper functionality of get_vm_compute
+ function when no VM with the given name is found
+ """
+
+ ErrorMSG = "There is no VM with name 'mock_vm_name'!!"
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.get_vm_compute('mock_vm_name')
+
+ self.assertEqual(cm.exception.message, ErrorMSG)
+
+ def test_get_vm_compute(self):
+ """
+ Checks the proper functionality of get_vm_compute
+ function
+ """
+
+ mock_cre_obj_1 = Mock()
+ mock_cre_obj_2 = Mock()
+ mock_cre_obj_1.get_vm_inst.return_value.name = 'pro_vm'
+ mock_cre_obj_2.get_vm_inst.return_value.name = 'dev_vm'
+ mock_cre_obj_2.get_vm_inst.return_value.compute_host = 'mock_host'
+ self.os_sfc.creators = [mock_cre_obj_1, mock_cre_obj_2]
+
+ result = self.os_sfc.get_vm_compute('dev_vm')
+ self.assertEqual('mock_host', result)
+
+ def test_get_port_by_ip(self):
+ """
+ Checks the proper functonality of get_port_by_ip function
+ """
+
+ mock_port_ip_address = 'e.f.g.h'
+ mock_port_one, mock_port_two = Mock(), Mock()
+ mock_port_one.id = 'port-abcd'
+ mock_port_two.id = 'port-efgz'
+ mock_port_one.fixed_ips = [{'ip_address': 'a.b.c.d'}]
+ mock_port_two.fixed_ips = [{'ip_address': 'e.f.g.h'}]
+ self.conn.network.ports.return_value = [mock_port_one, mock_port_two]
+ self.conn.network.get_port.return_value = mock_port_two
+ result = self.os_sfc.get_port_by_ip(mock_port_ip_address)
+ self.assertEqual(result, mock_port_two)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.OpenStackVmInstance',
+ autospec=True)
+ def test_get_instance_port_raised_exceptioin(self,
+ mock_os_vm,
+ mock_log):
+ """
+ Checks the proper functionality of get_client_port
+ function when no port is returned
+ """
+
+ mock_os_vm_ins = mock_os_vm.return_value
+ mock_vm = Mock()
+ mock_vm.name = 'mock_vm_name'
+ mock_os_vm_ins.get_port_by_name.return_value = None
+ ErrorMSG = 'Client VM does not have the desired port'
+ log_calls = [call("The VM mock_vm_name does not have any port"
+ " with name mock_vm_name-port")]
+
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.get_instance_port(mock_vm, mock_os_vm_ins)
+
+ self.assertEqual(cm.exception.message, ErrorMSG)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.OpenStackVmInstance',
+ autospec=True)
+ def test_get_instance_port(self,
+ mock_os_vm,
+ mock_log):
+ """
+ Checks the proper functionality of get_client_port
+ function when no port is returned
+ """
+
+ mock_os_vm_ins = mock_os_vm.return_value
+ mock_vm = Mock()
+ mock_vm.name = 'mock_vm_name'
+ mock_os_vm_ins.get_port_by_name.return_value = 'mock_port'
+ result = self.os_sfc.get_instance_port(mock_vm, mock_os_vm_ins)
+ self.assertEqual('mock_port', result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.env.get', autospec=True)
+ def test_assign_floating_ip(self,
+ mock_env_get,
+ mock_log):
+ """
+ Checks the proper functionality of assigning_floating_ip
+ function
+ """
+ ext_network_obj = Mock()
+ ext_network_obj.id = '1'
+ fip_obj = Mock()
+ fip_obj.floating_ip_address = 'floating_ip_address'
+ port_obj = Mock()
+ port_obj.id = '2'
+ instance_obj = Mock()
+ instance_obj.id = '3'
+
+ log_calls = [call(' Creating floating ips '),
+ call(' FLoating IP address '
+ + fip_obj.floating_ip_address
+ + ' created'),
+ call(' Adding Floating IPs to instances ')]
+
+ mock_env_get.return_value = 'ext_net_name'
+ self.conn.network.find_network.return_value = ext_network_obj
+ self.conn.network.create_ip.return_value = fip_obj
+ self.conn.netwotk.get_port.return_value = port_obj
+ self.conn.compute.get_server.return_value = instance_obj
+
+ result = self.os_sfc.assign_floating_ip(instance_obj, port_obj)
+ assert result is fip_obj.floating_ip_address
+
+ self.conn.network.find_network.assert_called_once_with('ext_net_name')
+ self.conn.network.create_ip.\
+ assert_called_once_with(floating_network_id=ext_network_obj.id,
+ port_id=port_obj.id)
+ self.conn.compute.add_floating_ip_to_server.\
+ assert_called_once_with(instance_obj.id,
+ fip_obj.floating_ip_address)
+
+ self.assertEqual([fip_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.heat_utils.get_stack_servers',
+ autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.generate_creator', autospec=True)
+ def test_assign_floating_ip_vnfs_raised_exception_ips_provided(
+ self, mock_generate_creator, mock_get_stack_servers, mock_log):
+ """
+ Checks the proper functionality of assign_floating_ip_vnfs
+ function when server name does not have any floating IP assignment
+ """
+
+ ErrorMSG = "The VNF server_name-float does not have any suitable" + \
+ " port with ip any of ['floating_ip', 'other_ip'] for" + \
+ " floating IP assignment"
+ log_calls = [call(ErrorMSG)]
+ self.os_sfc.image_settings = 'image_settings'
+ self.heat.stacks.list.return_value = ['stack_obj']
+ mock_ips = ['floating_ip', 'other_ip']
+ mock_server_obj = Mock()
+ mock_port_obj = Mock()
+ mock_server_obj.name = 'server_name'
+ mock_server_obj.ports = [mock_port_obj]
+ mock_port_obj.name = None
+ mock_port_obj.ips = [{'ip_address': 'floating_ip'}]
+ mock_get_stack_servers.return_value = [mock_server_obj]
+
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.assign_floating_ip_vnfs('router', mock_ips)
+
+ self.assertEqual(cm.exception.message, ErrorMSG)
+ mock_get_stack_servers.assert_called_once_with(self.heat,
+ self.nova,
+ self.neutron_client,
+ self.keystone,
+ 'stack_obj',
+ 'admin')
+ mock_generate_creator.assert_called_once_with(self.os_creds,
+ mock_server_obj,
+ 'image_settings',
+ 'admin')
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.heat_utils.get_stack_servers',
+ autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.generate_creator', autospec=True)
+ def test_assign_floating_ip_vnfs_raised_exception_ips_not_provided(
+ self, mock_generate_creator, mock_get_stack_servers, mock_log):
+ """
+ Checks the proper functionality of assign_floating_ip_vnfs
+ function when server name does not have any floating IP assignment
+ """
+
+ ErrorMSG = "The VNF server_name-float does not have any suitable" + \
+ " port for floating IP assignment"
+ log_calls = [call(ErrorMSG)]
+ self.os_sfc.image_settings = 'image_settings'
+ self.heat.stacks.list.return_value = ['stack_obj']
+ mock_server_obj = Mock()
+ mock_port_obj = Mock()
+ mock_server_obj.name = 'server_name'
+ mock_server_obj.ports = [mock_port_obj]
+ mock_port_obj.name = None
+ mock_port_obj.ips = [{'ip_address': 'floating_ip'}]
+ mock_get_stack_servers.return_value = [mock_server_obj]
+
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.assign_floating_ip_vnfs('router')
+
+ mock_get_stack_servers.assert_called_once_with(self.heat,
+ self.nova,
+ self.neutron_client,
+ self.keystone,
+ 'stack_obj',
+ 'admin')
+ mock_generate_creator.assert_called_once_with(self.os_creds,
+ mock_server_obj,
+ 'image_settings',
+ 'admin')
+ self.assertEqual(cm.exception.message, ErrorMSG)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.FloatingIpConfig', autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.generate_creator',
+ autospec=True)
+ @patch('sfc.lib.openstack_utils.heat_utils.get_stack_servers',
+ autospec=True)
+ def test_assign_floating_ip_vnfs(self,
+ mock_get_stack_servers,
+ mock_generate_creator,
+ mock_floating_ip_config):
+ """
+ Checks the proper functionality of assign_floating_ip_vnfs
+ function
+ """
+
+ self.os_sfc.image_settings = 'image_settings'
+ self.heat.stacks.list.return_value = ['stack_obj']
+
+ mock_router = Mock()
+ mock_server_obj = Mock()
+ mock_ip_obj = Mock()
+ mock_port_obj = Mock()
+ mock_router.name = 'm_router'
+ mock_server_obj.name = 'serv_obj'
+ mock_server_obj.ports = [mock_port_obj]
+ mock_ips = ['floating_ip', 'other_ip']
+ mock_ip_obj.ip = 'mocked_ip'
+ mock_port_obj.name = 'port_obj'
+ mock_port_obj.ips = [{'ip_address': 'floating_ip'}]
+ mock_get_stack_servers.return_value = [mock_server_obj]
+ mock_os_vm_ins = mock_generate_creator.return_value
+ float_ip_ins = mock_floating_ip_config.return_value
+ mock_os_vm_ins.add_floating_ip.return_value = mock_ip_obj
+
+ result = self.os_sfc.assign_floating_ip_vnfs(mock_router, mock_ips)
+ self.assertEqual(['mocked_ip'], result)
+ self.assertEqual([mock_os_vm_ins], self.os_sfc.creators)
+ mock_get_stack_servers.assert_called_once_with(self.heat,
+ self.nova,
+ self.neutron_client,
+ self.keystone,
+ 'stack_obj', 'admin')
+ mock_generate_creator.assert_called_once_with(self.os_creds,
+ mock_server_obj,
+ 'image_settings',
+ 'admin')
+ mock_floating_ip_config.assert_called_once_with(name='serv_obj-float',
+ port_name='port_obj',
+ router_name='m_router')
+ mock_os_vm_ins.add_floating_ip.assert_called_once_with(float_ip_ins)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_all_security_groups(self, mock_log):
+ """
+ Checks the proper functionality of delete_all_security_groups
+ function
+ """
+
+ log_calls_info = [call('Deleting remaining security groups...')]
+ secgrp1_obj = Mock()
+ secgrp2_obj = Mock()
+ secgrp_list = MagicMock()
+
+ self.conn.network.create_security_groups.side_effect = [secgrp1_obj,
+ secgrp2_obj]
+ self.conn.network.security_groups.return_value = secgrp_list
+
+ secgrp_list.__iter__.return_value = [secgrp1_obj, secgrp2_obj]
+ del_calls = [call(secgrp1_obj),
+ call(secgrp2_obj)]
+
+ self.os_sfc.delete_all_security_groups()
+ self.conn.network.security_groups.assert_called_once()
+ self.conn.network.delete_security_group.assert_has_calls(del_calls)
+ mock_log.info.assert_has_calls(log_calls_info)
+
+ @patch('sfc.lib.openstack_utils.cr_inst.OpenStackVmInstance',
+ autospec=True)
+ def test_wait_for_vnf(self, mock_os_vm):
+ """
+ Checks the proper functionality of wait_for_vnf function
+ """
+
+ mock_os_vm.vm_active.return_value = "x"
+ result = self.os_sfc.wait_for_vnf(mock_os_vm)
+ self.assertEqual('x', result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_raises_exception(self, mock_log):
+ """
+ Checks the create_port_groups when length of ports is greater than 2
+ """
+ instance_obj = Mock()
+ instance_obj.name = 'name'
+ self.conn.compute.get_server.return_value = instance_obj
+
+ log_calls_info = [call('Creating the port pairs...')]
+ log_calls_err = [call('Only SFs with one or two ports are supported')]
+ exception_message = "Failed to create port pairs"
+ vnf_ports = ['p1', 'p2', 'p3']
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.create_port_groups(vnf_ports, instance_obj)
+ self.assertEqual(exception_message, cm.exception.message)
+ mock_log.info.assert_has_calls(log_calls_info)
+ mock_log.error.assert_has_calls(log_calls_err)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_returns_none_from_pp(self, mock_log):
+ """
+ Checks the create_port_groups when something goes wrong in port pair
+ creation
+ """
+ instance_obj = Mock()
+ instance_obj.name = 'name'
+ port_obj1 = Mock()
+ port_obj2 = Mock()
+ port_obj1.id = '123abc'
+ port_obj2.id = '456def'
+
+ self.conn.compute.get_server.return_value = instance_obj
+ self.conn.network.get_port.return_value = port_obj1
+ self.conn.network.get_port.return_value = port_obj2
+
+ log_calls_info = [call('Creating the port pairs...')]
+ log_calls_warn = [call('Chain creation failed due to port pair '
+ 'creation failed for vnf %(vnf)s',
+ {'vnf': instance_obj.name})]
+ self.neutron_client.create_sfc_port_pair.return_value = None
+ result = self.os_sfc.create_port_groups(
+ [port_obj1, port_obj2], instance_obj)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls_info)
+ mock_log.warning.assert_has_calls(log_calls_warn)
+
+ @patch('snaps.domain.network.Port', autospec=True)
+ @patch('snaps.domain.vm_inst.VmInst', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_exception_nopp(self, mock_log, mock_osvm,
+ mock_port):
+ """
+ Checks the create_port_groups when openstack does not commit the pp
+ """
+
+ log_calls_info = [call('Creating the port pairs...')]
+ mock_port_ins = mock_port.return_value
+ mock_port_ins.id = '123abc'
+ mock_vm_ins = mock_osvm.return_value
+ mock_vm_ins.name = 'vm'
+ exception_message = "Port pair was not committed in openstack"
+ expected_port_pair = {'name': 'vm-connection-points',
+ 'description': 'port pair for vm',
+ 'ingress': '123abc',
+ 'egress': '123abc'}
+ self.neutron_client.create_sfc_port_pair.return_value = \
+ {'port_pair': {'id': 'pp_id'}}
+ self.neutron_client.list_sfc_port_pairs.return_value = \
+ {'port_pairs': [{'id': 'xxxx'}]}
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.create_port_groups([mock_port_ins], mock_vm_ins)
+ self.assertEqual(exception_message, cm.exception.message)
+ self.neutron_client.create_sfc_port_pair.assert_has_calls(
+ [call({'port_pair': expected_port_pair})])
+ mock_log.info.assert_has_calls(log_calls_info)
+
+ @patch('snaps.domain.network.Port', autospec=True)
+ @patch('snaps.domain.vm_inst.VmInst', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_returns_none_from_ppg(self, mock_log,
+ mock_vm,
+ mock_port):
+ """
+ Checks the create_port_groups when something goes wrong in port pair
+ group creation
+ """
+
+ instance_obj = Mock()
+ instance_obj.name = 'name'
+ port_obj = Mock()
+ port_obj.id = '123abc'
+
+ self.conn.compute.get_server.return_value = instance_obj
+ self.conn.network.get_port.return_value = port_obj
+
+ log_calls_info = [call('Creating the port pairs...'),
+ call('Creating the port pair groups for name')]
+ log_calls_warn = [call('Chain creation failed due to port pair group '
+ 'creation failed for vnf '
+ '{}'.format(instance_obj.name))]
+ self.neutron_client.create_sfc_port_pair.return_value = \
+ {'port_pair': {'id': 'pp_id'}}
+ self.neutron_client.list_sfc_port_pairs.return_value = \
+ {'port_pairs': [{'id': 'pp_id'}]}
+ self.neutron_client.create_sfc_port_pair_group.return_value = None
+ result = self.os_sfc.create_port_groups([port_obj], instance_obj)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls_info)
+ mock_log.warning.assert_has_calls(log_calls_warn)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_returns_id(self, mock_log):
+ """
+ Checks the create_port_groups when everything goes as expected
+ """
+
+ log_calls_info = [call('Creating the port pairs...')]
+
+ instance_obj = Mock()
+ instance_obj.name = 'vm'
+ port_obj = Mock()
+ port_obj.id = '123abc'
+ self.conn.compute.get_server.return_value = instance_obj
+ self.conn.network.get_port.return_value = port_obj
+
+ expected_port_pair = {'name': 'vm-connection-points',
+ 'description': 'port pair for vm',
+ 'ingress': '123abc',
+ 'egress': '123abc'}
+ self.neutron_client.create_sfc_port_pair.return_value = \
+ {'port_pair': {'id': 'pp_id'}}
+ self.neutron_client.list_sfc_port_pairs.return_value = \
+ {'port_pairs': [{'id': 'pp_id'}]}
+ self.neutron_client.create_sfc_port_pair_group.return_value = \
+ {'port_pair_group': {'id': 'pp_id'}}
+ expected_port_pair_gr = {'name': 'vm-port-pair-group',
+ 'description': 'port pair group for vm',
+ 'port_pairs': ['pp_id']}
+
+ self.os_sfc.create_port_groups([port_obj], instance_obj)
+ self.neutron_client.create_sfc_port_pair.assert_has_calls(
+ [call({'port_pair': expected_port_pair})])
+ self.neutron_client.create_sfc_port_pair_group.assert_has_calls(
+ [call({'port_pair_group': expected_port_pair_gr})])
+ mock_log.info.assert_has_calls(log_calls_info)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_classifier(self, mock_log):
+ """
+ Checks the create_classifier method
+ """
+
+ log_calls = [call('Creating the classifier...')]
+ neutron_port = 'neutron_port_id'
+ port = 80
+ protocol = 'tcp'
+ fc_name = 'red_http'
+ symmetrical = False
+ self.neutron_client.create_sfc_flow_classifier.return_value = \
+ {'flow_classifier': {'id': 'fc_id'}}
+
+ expected_sfc_classifier_params = {'name': fc_name,
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ self.os_sfc.create_classifier(neutron_port, port,
+ protocol, fc_name, symmetrical)
+ self.neutron_client.create_sfc_flow_classifier.assert_has_calls(
+ [call({'flow_classifier': expected_sfc_classifier_params})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_classifier_symmetric(self, mock_log):
+ """
+ Checks the create_chain method
+ """
+
+ log_calls = [call('Creating the classifier...')]
+ neutron_port = 'neutron_port_id'
+ port = 80
+ protocol = 'tcp'
+ fc_name = 'red_http'
+ symmetrical = True
+ serv_p = '123'
+ server_ip = '1.1.1.2'
+ self.neutron_client.create_sfc_flow_classifier.return_value = \
+ {'flow_classifier': {'id': 'fc_id'}}
+
+ expected_sfc_classifier_params = {'name': fc_name,
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'destination_ip_prefix': server_ip,
+ 'logical_destination_port': serv_p,
+ 'protocol': protocol}
+ self.os_sfc.create_classifier(neutron_port, port,
+ protocol, fc_name, symmetrical,
+ server_port='123',
+ server_ip='1.1.1.2')
+ self.neutron_client.create_sfc_flow_classifier.assert_has_calls(
+ [call({'flow_classifier': expected_sfc_classifier_params})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_chain(self, mock_log):
+ """
+ Checks the create_chain method
+ """
+
+ log_calls = [call('Creating the classifier...'),
+ call('Creating the chain...')]
+ port_groups = ['1a', '2b']
+ neutron_port = 'neutron_port_id'
+ port = 80
+ protocol = 'tcp'
+ vnffg_name = 'red_http'
+ symmetrical = False
+ self.neutron_client.create_sfc_flow_classifier.return_value = \
+ {'flow_classifier': {'id': 'fc_id'}}
+ self.neutron_client.create_sfc_port_chain.return_value = \
+ {'port_chain': {'id': 'pc_id'}}
+
+ expected_sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ expected_chain_config = {'name': vnffg_name + '-port-chain',
+ 'description': 'port-chain for SFC',
+ 'port_pair_groups': port_groups,
+ 'flow_classifiers': ['fc_id']}
+
+ self.os_sfc.create_chain(port_groups, neutron_port, port,
+ protocol, vnffg_name, symmetrical)
+
+ self.neutron_client.create_sfc_flow_classifier.assert_has_calls(
+ [call({'flow_classifier': expected_sfc_classifier_params})])
+ self.neutron_client.create_sfc_port_chain.assert_has_calls(
+ [call({'port_chain': expected_chain_config})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_chain_symmetric(self, mock_log):
+ """
+ Checks the create_chain method
+ """
+
+ log_calls = [call('Creating the classifier...'),
+ call('Creating the chain...')]
+ port_groups = ['1a', '2b']
+ neutron_port = 'neutron_port_id'
+ port = 80
+ protocol = 'tcp'
+ vnffg_name = 'red_http'
+ symmetrical = True
+ serv_p = '123abc'
+ server_ip = '1.1.1.2'
+ self.neutron_client.create_sfc_flow_classifier.return_value = \
+ {'flow_classifier': {'id': 'fc_id'}}
+ self.neutron_client.create_sfc_port_chain.return_value = \
+ {'port_chain': {'id': 'pc_id'}}
+
+ expected_sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'destination_ip_prefix': server_ip,
+ 'logical_destination_port': serv_p,
+ 'protocol': protocol}
+ expected_chain_config = {'name': vnffg_name + '-port-chain',
+ 'description': 'port-chain for SFC',
+ 'port_pair_groups': port_groups,
+ 'flow_classifiers': ['fc_id'],
+ 'chain_parameters': {'symmetric': True}}
+
+ self.os_sfc.create_chain(port_groups, neutron_port, port,
+ protocol, vnffg_name, symmetrical,
+ server_port=serv_p, server_ip=server_ip)
+
+ self.neutron_client.create_sfc_flow_classifier.assert_has_calls(
+ [call({'flow_classifier': expected_sfc_classifier_params})])
+ self.neutron_client.create_sfc_port_chain.assert_has_calls(
+ [call({'port_chain': expected_chain_config})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_update_chain_symmetric(self, mock_log):
+ """
+ Checks the update_chain method
+ """
+
+ log_calls = [call('Update the chain...')]
+ vnffg_name = 'red_http'
+ fc_name = 'blue_ssh'
+ symmetrical = True
+ self.neutron_client.find_resource.return_value = \
+ {'id': 'fc_id'}
+ expected_chain_config = {'name': vnffg_name + '-port-chain',
+ 'flow_classifiers': ['fc_id'],
+ 'chain_parameters': {'symmetric': True}}
+ self.os_sfc.update_chain(vnffg_name, fc_name, symmetrical)
+ self.neutron_client.update_sfc_port_chain.assert_has_calls(
+ [call('fc_id', {'port_chain': expected_chain_config})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_swap_classifiers(self, mock_log):
+ """
+ Checks the swap_classifiers method
+ """
+
+ log_calls = [call('Swap classifiers...')]
+ vnffg_1_name = 'red_http'
+ vnffg_2_name = 'blue_ssh'
+ symmetrical = False
+ self.os_sfc.swap_classifiers(vnffg_1_name, vnffg_2_name, symmetrical)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_port_groups(self, mock_log):
+ """
+ Checks the delete_port_groups method
+ """
+ log_calls = [call('Deleting the port groups...'),
+ call('Deleting the port pairs...')]
+ self.neutron_client.list_sfc_port_pair_groups.return_value = \
+ {'port_pair_groups': [{'id': 'id_ppg1'}, {'id': 'id_ppg2'}]}
+ self.neutron_client.list_sfc_port_pairs.return_value = \
+ {'port_pairs': [{'id': 'id_pp1'}, {'id': 'id_pp2'}]}
+ self.os_sfc.delete_port_groups()
+
+ self.neutron_client.delete_sfc_port_pair_group.assert_has_calls(
+ [call('id_ppg1'), call('id_ppg2')])
+ self.neutron_client.delete_sfc_port_pair.assert_has_calls(
+ [call('id_pp1'), call('id_pp2')])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_chain(self, mock_log):
+ """
+ Checks the delete_chain method
+ """
+ log_calls = [call('Deleting the chain...'),
+ call('Deleting the classifiers...')]
+ self.neutron_client.list_sfc_port_chains.return_value = \
+ {'port_chains': [{'id': 'id_pc1'}]}
+ self.neutron_client.list_sfc_flow_classifiers.return_value = \
+ {'flow_classifiers': [{'id': 'id_fc1'}]}
+ self.os_sfc.delete_chain()
+
+ self.neutron_client.delete_sfc_port_chain.\
+ assert_has_calls([call('id_pc1')])
+ self.neutron_client.delete_sfc_flow_classifier.assert_has_calls(
+ [call('id_fc1')])
+ mock_log.info.assert_has_calls(log_calls)
+
+
+class SfcTackerSectionTesting(unittest.TestCase):
+ def setUp(self):
+ self.patcher = patch.object(tacker_client, 'Client', autospec=True)
+ self.mock_tacker_client = self.patcher.start().return_value
+
+ def tearDown(self):
+ self.patcher.stop()
+
+ @patch('os.getenv', autospec=True, return_value=None)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_tacker_client_version_returned_default(self,
+ mock_log,
+ mock_getenv):
+ """
+ Checks the proper functionality of get_tacker_client_version
+ function when the os.getenv returns none
+ """
+ result = os_sfc_utils.get_tacker_client_version()
+ self.assertEqual(result, '1.0')
+ mock_getenv.assert_called_once_with('OS_TACKER_API_VERSION')
+ mock_log.info.assert_not_called()
+
+ @patch('os.getenv', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_tacker_client_version(self,
+ mock_log,
+ mock_getenv):
+ """
+ Checks the proper functionality of get_tacker_client_version
+ function when the os.getenv returns version
+ """
+
+ ver = '2.0'
+ mock_getenv.return_value = ver
+ log_calls = [call("OS_TACKER_API_VERSION is set in env as '%s'", ver)]
+
+ result = os_sfc_utils.get_tacker_client_version()
+ self.assertEqual(result, ver)
+ mock_getenv.assert_called_once_with('OS_TACKER_API_VERSION')
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_id_from_name_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of get_id_from_name
+ function when tacker_client.list returns None
+ """
+
+ resource_name = 'mock_resource_name'
+ resource_type = 'mock_resource_type'
+ params = {'fields': 'id', 'name': resource_name}
+ collection = resource_type + 's'
+ path = '/' + collection
+ self.mock_tacker_client.list.side_effect = Exception('ErrorMSG')
+ log_calls = [call('Error [get_id_from_name(tacker_client, '
+ 'resource_type, resource_name)]: ErrorMSG')]
+
+ result = os_sfc_utils.get_id_from_name(self.mock_tacker_client,
+ resource_type,
+ resource_name)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list.assert_called_once_with(collection,
+ path,
+ **params)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.openstack_tests.get_credentials',
+ autospec=True, return_value='os_creds')
+ @patch('sfc.lib.openstack_utils.keystone_utils.keystone_session',
+ autospec=True, return_value='keystone_session_obj')
+ @patch('sfc.lib.openstack_utils.constants.ENV_FILE', autospec=True)
+ @patch('sfc.lib.openstack_utils.tackerclient.Client', autospec=True)
+ def test_get_tacker_client(self, mock_tacker_client,
+ mock_env_file,
+ mock_keystone_session,
+ mock_get_credentials):
+ """
+ checks the proper functionality of get_tacker_client
+ function
+ """
+
+ mock_tacker_client_ins = mock_tacker_client.return_value
+ result = os_sfc_utils.get_tacker_client()
+ assert result is mock_tacker_client_ins
+ mock_get_credentials.assert_called_once_with(os_env_file=mock_env_file,
+ overrides=None)
+ mock_keystone_session.assert_called_once_with('os_creds')
+ mock_tacker_client.assert_called_once_with(
+ '1.0', session='keystone_session_obj')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_id_from_name(self, mock_log):
+ """
+ Checks the proper functionality of get_id_from_name
+ function when tacker_client.list returns id
+ """
+
+ resource_name = 'mock_resource_name'
+ resource_type = 'mock_resource_type'
+ params = {'fields': 'id', 'name': resource_name}
+ collection = resource_type + 's'
+ self.mock_tacker_client.list.return_value = {collection: {0: {'id':
+ 'mock_id'}}}
+ path = '/' + collection
+ result = os_sfc_utils.get_id_from_name(self.mock_tacker_client,
+ resource_type,
+ resource_name)
+ self.assertEqual('mock_id', result)
+ self.mock_tacker_client.list.assert_called_once_with(collection,
+ path,
+ **params)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vnfd_id(self, mock_get_id):
+ """
+ Checks the proper functionality of get_vnfd_id
+ function
+ """
+
+ mock_get_id.return_value = 'id'
+ result = os_sfc_utils.get_vnfd_id(self.mock_tacker_client,
+ 'vnfd_name')
+ self.assertEqual('id', result)
+ mock_get_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnfd',
+ 'vnfd_name')
+
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vim_id(self, mock_get_id):
+ """
+ Checks the proper fucntionality of get_vim_id
+ function
+ """
+
+ mock_get_id.return_value = 'id'
+ result = os_sfc_utils.get_vim_id(self.mock_tacker_client, 'vim_name')
+ mock_get_id.assert_called_once_with(self.mock_tacker_client,
+ 'vim',
+ 'vim_name')
+ self.assertEqual('id', result)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vnf_id(self,
+ mock_get_id,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of get_vnf_id
+ function
+ """
+
+ vnf_name = 'mock_vnf'
+ log_calls = [call("Could not retrieve ID for vnf with name [%s]."
+ " Retrying." % vnf_name)]
+
+ get_id_calls = [call(self.mock_tacker_client, 'vnf', vnf_name)] * 2
+
+ mock_get_id.side_effect = [None, 'vnf_id']
+
+ result = os_sfc_utils.get_vnf_id(self.mock_tacker_client, vnf_name, 2)
+ self.assertEqual('vnf_id', result)
+ mock_sleep.assert_called_once_with(1)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_get_id.assert_has_calls(get_id_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vnffg_id(self,
+ mock_get_id,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of get_vnffg_id
+ function
+ """
+
+ vnffg_name = 'mock_vnffg'
+ log_calls = [call("Could not retrieve ID for vnffg with name [%s]."
+ " Retrying." % vnffg_name)]
+
+ get_id_calls = [call(self.mock_tacker_client, 'vnffg', vnffg_name)] * 2
+
+ mock_get_id.side_effect = [None, 'vnf_id']
+
+ result = os_sfc_utils.get_vnffg_id(self.mock_tacker_client,
+ vnffg_name,
+ 2)
+ self.assertEqual('vnf_id', result)
+ mock_sleep.assert_called_once_with(1)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_get_id.assert_has_calls(get_id_calls)
+
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vnffgd_id(self, mock_get_id):
+ """
+ Checks the proper functionality of get_vnffgd_id
+ function
+ """
+
+ mock_get_id.return_value = 'id'
+ result = os_sfc_utils.get_vnffgd_id(self.mock_tacker_client,
+ 'vnffgd_name')
+ mock_get_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnffgd',
+ 'vnffgd_name')
+ self.assertEqual('id', result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnfds_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vnfds
+ function when the list_vnfds returns none
+ """
+
+ log_calls = [call('Error [list_vnfds(tacker_client)]: ErrorMSG')]
+ self.mock_tacker_client.list_vnfds.side_effect = Exception('ErrorMSG')
+ result = os_sfc_utils.list_vnfds(self.mock_tacker_client)
+ mock_log.error.assert_has_calls(log_calls)
+ self.mock_tacker_client.list_vnfds.assert_called_once_with(
+ retrieve_all=True)
+ self.assertIsNone(result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnfds(self, mock_log):
+ """
+ Checks the proper functionality of list_vnfds
+ function when the list_vnfds returns vnfds
+ """
+
+ vnfds = {
+ 'vnfds': [{'id': 1},
+ {'id': 2}]
+ }
+ self.mock_tacker_client.list_vnfds.return_value = vnfds
+ result = os_sfc_utils.list_vnfds(self.mock_tacker_client)
+ self.mock_tacker_client.list_vnfds.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.assert_not_called()
+ self.assertEqual([1, 2], result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnfd_returned_none_tosca_file_not_provided(self, mock_log):
+ """
+ Checks the proper functionality of create_vnfd
+ function when an exception is raised
+ """
+
+ log_calls = [call("Creating the vnfd..."),
+ call("Error [create_vnfd(tacker_client, 'None')]: "
+ "ErrorMSG")]
+
+ self.mock_tacker_client.create_vnfd.side_effect = Exception('ErrorMSG')
+ result = os_sfc_utils.create_vnfd(self.mock_tacker_client,
+ None,
+ 'vnfd_name')
+ self.assertIsNone(result)
+ self.mock_tacker_client.create_vnfd.assert_called_once_with(
+ body={'vnfd': {'attributes': {'vnfd': {}},
+ 'name': 'vnfd_name'}})
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnfd_returned_none_tosca_file_provided(self,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnfd
+ function when an exception is raised
+ """
+
+ log_calls = [call("Creating the vnfd..."),
+ call("VNFD template:\nmock_vnfd"),
+ call("Error [create_vnfd(tacker_client, 'tosca_file')]: "
+ "ErrorMSG")]
+
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_vnfd'
+ mock_safe_load.return_value = 'mock_vnfd_body'
+ self.mock_tacker_client.create_vnfd.side_effect = Exception('ErrorMSG')
+ result = os_sfc_utils.create_vnfd(self.mock_tacker_client,
+ 'tosca_file',
+ 'vnfd_name')
+ self.assertIsNone(result)
+ mock_open.assert_called_once_with('tosca_file')
+ open_handler.read.assert_called_once_with()
+ mock_safe_load.assert_called_once_with('mock_vnfd')
+ mock_log.info.assert_has_calls(log_calls[:2])
+ mock_log.error.assert_has_calls(log_calls[2:])
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnfd(self,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnfd
+ function
+ """
+
+ log_calls = [call("VNFD template:\nmock_vnfd")]
+
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_vnfd'
+ mock_safe_load.return_value = 'mock_vnfd_body'
+ result = os_sfc_utils.create_vnfd(self.mock_tacker_client,
+ 'tosca_file',
+ 'vnfd_name')
+ assert result is self.mock_tacker_client.create_vnfd.return_value
+ self.mock_tacker_client.create_vnfd.assert_called_once_with(
+ body={"vnfd": {"attributes": {"vnfd": "mock_vnfd_body"},
+ "name": "vnfd_name"}})
+ mock_open.assert_called_once_with('tosca_file')
+ open_handler.read.assert_called_once_with()
+ mock_safe_load.assert_called_once_with('mock_vnfd')
+ mock_log.info.assert_has_calls(log_calls)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vnfd_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vnfd
+ function when an exception is raised
+ """
+
+ log_calls = [call("Error [delete_vnfd(tacker_client, 'None', 'None')]:"
+ " You need to provide VNFD id or VNFD name")]
+
+ result = os_sfc_utils.delete_vnfd(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vnfd_id',
+ autospec=True, return_value='vnfd')
+ def test_delete_vnfd(self, mock_get_vnfd_id):
+ """
+ Checks the proper functionality of delete_vnfd
+ function
+ """
+
+ result = os_sfc_utils.delete_vnfd(self.mock_tacker_client,
+ None,
+ 'vnfd_name')
+ assert result is self.mock_tacker_client.delete_vnfd.return_value
+ mock_get_vnfd_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnfd_name')
+ self.mock_tacker_client.delete_vnfd.assert_called_once_with('vnfd')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnfs_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vnfs
+ function
+ """
+
+ log_calls = [call("Error [list_vnfs(tacker_client)]: ErrorMSG")]
+
+ self.mock_tacker_client.list_vnfs.side_effect = Exception('ErrorMSG')
+ result = os_sfc_utils.list_vnfs(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list_vnfs.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.error.assert_has_calls(log_calls)
+
+ def test_list_vnfs(self):
+ """
+ Checks the proper functionality of list_vnfs
+ function
+ """
+ vnfs = {'vnfs': [{'id': 1},
+ {'id': 2}]}
+
+ self.mock_tacker_client.list_vnfs.return_value = vnfs
+ result = os_sfc_utils.list_vnfs(self.mock_tacker_client)
+ self.assertEqual([1, 2], result)
+ self.mock_tacker_client.list_vnfs.assert_called_once_with(
+ retrieve_all=True)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnf_returned_none_vnfd_not_provided(self, mock_log):
+ """
+ Checks the proper functionality of create_vnf
+ function when an exception is raised
+ """
+
+ log_calls = [call("Creating the vnf..."),
+ call("error [create_vnf(tacker_client,"
+ " 'vnf_name', 'None', 'None')]: "
+ "vnfd id or vnfd name is required")]
+ result = os_sfc_utils.create_vnf(self.mock_tacker_client, 'vnf_name')
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnf_returned_none_vnfd_provided(self, mock_log):
+ """
+ Checks the proper functionality of create_vnf
+ function when an exception is raised
+ """
+
+ log_calls = [call("Creating the vnf..."),
+ call("error [create_vnf(tacker_client,"
+ " 'vnf_name', 'None', 'vnfd_name')]: "
+ "vim id or vim name is required")]
+ result = os_sfc_utils.create_vnf(self.mock_tacker_client,
+ 'vnf_name',
+ None,
+ 'vnfd_name',
+ None,
+ None)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vim_id',
+ autospec=True, return_value='vim_id')
+ @patch('sfc.lib.openstack_utils.get_vnfd_id',
+ autospec=True, return_value='vnfd_id')
+ def test_create_vnf_vim_id_not_provided(self,
+ mock_get_vnfd_id,
+ mock_get_vim_id,
+ mock_log,
+ mock_open):
+ """
+ Checks the proper functionality of create_vnf
+ function
+ """
+ mock_body = {'vnf': {'attributes': {'param_values': 'mock_data'},
+ 'vim_id': 'vim_id',
+ 'name': 'vnf_name',
+ 'vnfd_id': 'vnfd_id'}}
+ log_calls = [call('Creating the vnf...')]
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_data'
+ result = os_sfc_utils.create_vnf(self.mock_tacker_client,
+ 'vnf_name',
+ None,
+ 'vnfd_name',
+ None,
+ 'vim_name',
+ 'param_file')
+
+ assert result is self.mock_tacker_client.create_vnf.return_value
+ mock_get_vnfd_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnfd_name')
+ mock_get_vim_id.assert_called_once_with(self.mock_tacker_client,
+ 'vim_name')
+ mock_log.info.assert_has_calls(log_calls)
+ self.mock_tacker_client.create_vnf.assert_called_once_with(
+ body=mock_body)
+
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnf_vim_id_provided(self, mock_log, mock_open):
+ """
+ Checks the proper functionality of create_vnf
+ function
+ """
+ mock_body = {'vnf': {'attributes': {},
+ 'vim_id': 'vim_id',
+ 'name': 'vnf_name',
+ 'vnfd_id': 'vnfd_id'}}
+ log_calls = [call('Creating the vnf...')]
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_data'
+
+ result = os_sfc_utils.create_vnf(self.mock_tacker_client,
+ 'vnf_name',
+ 'vnfd_id',
+ 'vnfd_name',
+ 'vim_id',
+ 'vim_name')
+ assert result is self.mock_tacker_client.create_vnf.return_value
+ mock_log.info.assert_has_calls(log_calls)
+ self.mock_tacker_client.create_vnf.assert_called_once_with(
+ body=mock_body)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_vnf_returned_none_vnf_not_provided(self, mock_log):
+ """
+ Checks the proper functionality of get_vnf
+ functionality when an exception is raised
+ """
+
+ log_calls = [call("Could not retrieve VNF [vnf_id=None, vnf_name=None]"
+ " - You must specify vnf_id or vnf_name")]
+
+ result = os_sfc_utils.get_vnf(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vnf_id',
+ autospec=True, return_value=None)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_vnf_returned_none_vnf_provided(self,
+ mock_log,
+ mock_get_vnf_id):
+ """
+ Checks the proper functionality of get_vnf
+ functionality when an exception is raised
+ """
+
+ log_calls = [call("Could not retrieve VNF [vnf_id=None, "
+ "vnf_name=vnf_name] - Could not retrieve ID from "
+ "name [vnf_name]")]
+ result = os_sfc_utils.get_vnf(self.mock_tacker_client,
+ None,
+ 'vnf_name')
+ self.assertIsNone(result)
+ mock_get_vnf_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_name')
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.list_vnfs', autospec=True)
+ def test_get_vnf(self,
+ mock_list_vnfs,
+ mock_log):
+ """
+ Checks the proper functionality of get_vnf
+ function
+ """
+
+ vnf = {'vnfs': [{'id': 'default'},
+ {'id': 'vnf_id'}]}
+
+ mock_list_vnfs.return_value = vnf
+ result = os_sfc_utils.get_vnf(self.mock_tacker_client, 'vnf_id', None)
+ self.assertDictEqual(vnf['vnfs'][1], result)
+ mock_log.error.assert_not_called()
+
+ @patch('json.loads', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospe=True)
+ def test_get_vnf_ip(self,
+ mock_get_vnf,
+ mock_json_loads):
+ """
+ Checks the proper functionality of get_vnf_ip
+ function
+ """
+
+ vnf = {"mgmt_url": {"VDU1": "192.168.120.3"}}
+ mock_get_vnf.return_value = vnf
+ mock_json_loads.return_value = vnf['mgmt_url']
+ result = os_sfc_utils.get_vnf_ip(self.mock_tacker_client)
+ self.assertEqual("192.168.120.3", result)
+ mock_get_vnf.assert_called_once_with(self.mock_tacker_client,
+ None,
+ None)
+ mock_json_loads.assert_called_once_with(vnf['mgmt_url'])
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospec=True)
+ def test_wait_for_vnf_returned_none_unable_to_retrieve_vnf(self,
+ mock_get_vnf,
+ mock_log):
+ """
+ Checks the proper functionality of wait_for_vnf
+ function when an Exception is raised
+ """
+
+ mock_get_vnf.return_value = None
+ log_calls = [call("error [wait_for_vnf(tacker_client, 'vnf_id', "
+ "'vnf_name')]: Could not retrieve VNF - id='vnf_id',"
+ " name='vnf_name'")]
+
+ result = os_sfc_utils.wait_for_vnf(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name',
+ 0)
+ self.assertIsNone(result)
+ mock_get_vnf.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name')
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospec=True)
+ def test_wait_for_vnf_returned_none_unable_to_boot_vnf(self,
+ mock_get_vnf,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of wait_for_vnf
+ function when an Exception is raised
+ """
+
+ mock_vnf_values = [{'id': 'vnf_id',
+ 'status': 'ERROR'},
+ {'id': 'vnf_id',
+ 'status': 'PEDNING_CREATE'}]
+ mock_get_vnf.side_effect = mock_vnf_values
+ log_calls = [call("Waiting for vnf %s" % str(mock_vnf_values[0])),
+ call("error [wait_for_vnf(tacker_client, 'vnf_id', "
+ "'vnf_name')]: Error when booting vnf vnf_id")]
+
+ result = os_sfc_utils.wait_for_vnf(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name',
+ 0)
+ self.assertIsNone(result)
+ mock_get_vnf.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name')
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospec=True)
+ def test_wait_for_vnf_returned_none_timeout_booting_vnf(self,
+ mock_get_vnf,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of wait_for_vnf
+ function when an Exception is raised
+ """
+
+ mock_vnf_values = [{'id': 'vnf_id',
+ 'status': 'PENDING_CREATE'},
+ {'id': 'vnf_id',
+ 'status': 'PENDING_CREATE'}]
+ mock_get_vnf.side_effect = mock_vnf_values
+ log_calls = [call("Waiting for vnf %s" % str(mock_vnf_values[1])),
+ call("error [wait_for_vnf(tacker_client, 'vnf_id', "
+ "'vnf_name')]: Timeout when booting vnf vnf_id")]
+
+ result = os_sfc_utils.wait_for_vnf(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name',
+ 0)
+ self.assertIsNone(result)
+ mock_get_vnf.assert_called_with(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name')
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospec=True)
+ def test_wait_for_vnf(self,
+ mock_get_vnf,
+ mock_log,
+ mock_sleep):
+ """
+ Checks for the proper functionality of wait_for_vnf
+ function
+ """
+
+ mock_vnf_values = [{'status': 'PENDING_CREATE',
+ 'id': 'vnf_id'},
+ {'status': 'ACTIVE',
+ 'id': 'vnf_id'}]
+
+ log_calls = [call("Waiting for vnf %s" % mock_vnf_values[0])]
+
+ mock_get_vnf.side_effect = mock_vnf_values
+
+ result = os_sfc_utils.wait_for_vnf(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name',
+ 3)
+ self.assertEqual('vnf_id', result)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vnf_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vnf
+ function
+ """
+
+ log_calls = [call("Error [delete_vnf(tacker_client, 'None', 'None')]:"
+ " You need to provide a VNF id or name")]
+ result = os_sfc_utils.delete_vnf(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf_id', autospec=True)
+ def test_delete_vnf(self,
+ mock_get_vnf_id,
+ mock_log):
+ """
+ Checks the proper functionality of delete_vnf
+ function
+ """
+
+ mock_get_vnf_id.return_value = 'vnf'
+ result = os_sfc_utils.delete_vnf(self.mock_tacker_client,
+ None,
+ 'vnf_name')
+ assert result is self.mock_tacker_client.delete_vnf.return_value
+ mock_get_vnf_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_name')
+ self.mock_tacker_client.delete_vnf.assert_called_once_with('vnf')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vim_returned_none(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_vim
+ function when the vim_file is not provided
+ """
+
+ self.mock_tacker_client.create_vim.side_effect = Exception('ErrorMSG')
+ log_calls = [[call("Creating the vim...")],
+ [call("Error [create_vim(tacker_client, 'None')]"
+ ": ErrorMSG")]]
+
+ result = os_sfc_utils.create_vim(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.create_vim.assert_called_once_with(body={})
+ mock_log.info.assert_has_calls(log_calls[0])
+ mock_log.error.assert_has_calls(log_calls[1])
+
+ @patch('json.load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vim(self,
+ mock_log,
+ mock_open,
+ mock_json_loads):
+ """
+ Checks the proper functionality of create_vim
+ function
+ """
+
+ log_calls = [call("Creating the vim..."),
+ call("VIM template:\nmock_data")]
+
+ open_handler = mock_open.return_value.__enter__.return_value
+ mock_json_loads.return_value = 'mock_data'
+ result = os_sfc_utils.create_vim(self.mock_tacker_client, 'vim_file')
+ assert result is self.mock_tacker_client.create_vim.return_value
+ mock_log.info.assert_has_calls(log_calls)
+ mock_open.assert_called_once_with('vim_file')
+ mock_json_loads.assert_called_once_with(open_handler)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnffgd_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of create_vnffgd
+ function when create_vnffgd raises an Exception
+ """
+
+ self.mock_tacker_client.create_vnffgd.side_effect = Exception(
+ 'ErrorMSG')
+ log_calls = [[call("Creating the vnffgd...")],
+ [call("Error [create_vnffgd(tacker_client, 'None')]"
+ ": ErrorMSG")]]
+
+ result = os_sfc_utils.create_vnffgd(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls[0])
+ mock_log.error.assert_has_calls(log_calls[1])
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnffgd(self,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnffgd
+ function
+ """
+
+ log_calls = [call('Creating the vnffgd...'),
+ call('VNFFGD template:\nmock_data')]
+
+ vnffgd_body = {'id': 0, 'type': 'dict'}
+
+ mock_vim_body = {'vnffgd': {'name': 'vnffgd_name',
+ 'template': {'vnffgd': vnffgd_body}}}
+
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_data'
+ mock_safe_load.return_value = {'id': 0, 'type': 'dict'}
+ result = os_sfc_utils.create_vnffgd(self.mock_tacker_client,
+ 'tosca_file',
+ 'vnffgd_name')
+ assert result is self.mock_tacker_client.create_vnffgd.return_value
+ mock_open.assert_called_once_with('tosca_file')
+ mock_safe_load.assert_called_once_with('mock_data')
+ self.mock_tacker_client.create_vnffgd.assert_called_once_with(
+ body=mock_vim_body)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnffg_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of create_vnffg
+ function when the vnffgd id or vnffg name is not provided
+ """
+
+ log_calls = [[call("Creating the vnffg...")],
+ [call("error [create_vnffg(tacker_client,"
+ " 'None', 'None', 'None')]: "
+ "vnffgd id or vnffgd name is required")]]
+
+ result = os_sfc_utils.create_vnffg(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls[0])
+ mock_log.error.assert_has_calls(log_calls[1])
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnffgd_id', autospec=True)
+ def test_create_vnffg_vnffgd_id_not_provided(self,
+ mock_get_vnffgd_id,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnffg
+ function when the vnffgd id or vnffg name is not provided
+ """
+
+ log_calls = [call('Creating the vnffg...')]
+ vnffg_calls = [call(body={
+ 'vnffg': {
+ 'attributes': {'param_values': {'type': 'dict',
+ 'id': 0}},
+ 'vnffgd_id': 'mocked_vnffg_id',
+ 'name': 'vnffg_name',
+ 'symmetrical': False}})]
+ mock_get_vnffgd_id.return_value = 'mocked_vnffg_id'
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'data'
+ mock_safe_load.return_value = {'id': 0, 'type': 'dict'}
+
+ result = os_sfc_utils.create_vnffg(self.mock_tacker_client,
+ 'vnffg_name',
+ None,
+ 'vnffgd_name',
+ 'param_file')
+ assert result is self.mock_tacker_client.create_vnffg.return_value
+ mock_open.assert_called_once_with('param_file')
+ open_handler.read.assert_called_once_with()
+ mock_get_vnffgd_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnffgd_name')
+ mock_safe_load.assert_called_once_with('data')
+ mock_log.info.assert_has_calls(log_calls)
+ self.mock_tacker_client.create_vnffg.assert_has_calls(vnffg_calls)
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnffg_vnffgd_id_provided(self,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnffg
+ function when the vnffgd id or vnffg name is not provided
+ """
+
+ log_calls = [call('Creating the vnffg...')]
+ vnffg_calls = [call(body={
+ 'vnffg': {
+ 'attributes': {'param_values': {'type': 'dict',
+ 'id': 0}},
+ 'vnffgd_id': 'vnffgd_id',
+ 'name': 'vnffg_name',
+ 'symmetrical': False}})]
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'data'
+ mock_safe_load.return_value = {'id': 0, 'type': 'dict'}
+
+ result = os_sfc_utils.create_vnffg(self.mock_tacker_client,
+ 'vnffg_name',
+ 'vnffgd_id',
+ 'vnffgd_name',
+ 'param_file')
+ assert result is self.mock_tacker_client.create_vnffg.return_value
+ mock_open.assert_called_once_with('param_file')
+ open_handler.read.assert_called_once_with()
+ mock_safe_load.assert_called_once_with('data')
+ mock_log.info.assert_has_calls(log_calls)
+ self.mock_tacker_client.create_vnffg.assert_has_calls(vnffg_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnffgds_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vnffgds
+ function when list_vnffgds raises an Exception
+ """
+
+ self.mock_tacker_client.list_vnffgds.side_effect = Exception(
+ 'ErrorMSG')
+ log_calls = [call('Error [list_vnffgds(tacker_client)]: ErrorMSG')]
+
+ result = os_sfc_utils.list_vnffgds(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list_vnffgds.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.error.assert_has_calls(log_calls)
+
+ def test_list_vnffgds(self):
+ """
+ Checks the proper functtionality of list_vnffgds
+ function
+ """
+
+ vnffgds = {'vnffgds': [{'id': 'vnffgd_obj_one'},
+ {'id': 'vnffgd_obj_two'}]}
+
+ mock_vnffgds = ['vnffgd_obj_one', 'vnffgd_obj_two']
+
+ self.mock_tacker_client.list_vnffgds.return_value = vnffgds
+ result = os_sfc_utils.list_vnffgds(self.mock_tacker_client)
+ self.assertEqual(mock_vnffgds, result)
+ self.mock_tacker_client.list_vnffgds.assert_called_once_with(
+ retrieve_all=True)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnffgs_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vnffgs
+ function when list_vnffgs raises an Exception
+ """
+
+ self.mock_tacker_client.list_vnffgs.side_effect = Exception('ErrorMSG')
+ log_calls = [call('Error [list_vnffgs(tacker_client)]: ErrorMSG')]
+
+ result = os_sfc_utils.list_vnffgs(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list_vnffgs.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.error.assert_has_calls(log_calls)
+
+ def test_list_vnffgs(self):
+ """
+ Checks the proper functionality of list_vnffgs
+ function
+ """
+
+ vnffgs = {'vnffgs': [{'id': 'vnffg_obj_one'},
+ {'id': 'vnffg_obj_two'}]}
+
+ mock_vnffgs = ['vnffg_obj_one', 'vnffg_obj_two']
+
+ self.mock_tacker_client.list_vnffgs.return_value = vnffgs
+ result = os_sfc_utils.list_vnffgs(self.mock_tacker_client)
+ self.assertEqual(mock_vnffgs, result)
+ self.mock_tacker_client.list_vnffgs.assert_called_once_with(
+ retrieve_all=True)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vnffg_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vnffg
+ function
+ """
+
+ log_calls = [call("Error [delete_vnffg(tacker_client, 'None', 'None')]"
+ ": You need to provide a VNFFG id or name")]
+
+ result = os_sfc_utils.delete_vnffg(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vnffg_id',
+ autospec=True, return_value='vnffg')
+ def test_delete_vnffg(self, mock_get_vnffg_id):
+ """
+ Checks the proper functionality of delete_vnffg
+ function
+ """
+
+ self.mock_tacker_client.delete_vnffg.return_value = 'deleted'
+ result = os_sfc_utils.delete_vnffg(self.mock_tacker_client,
+ None,
+ 'vnffg_name')
+ self.assertEqual('deleted', result)
+ mock_get_vnffg_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnffg_name')
+ self.mock_tacker_client.delete_vnffg.assert_called_once_with('vnffg')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vnffgd_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vnffgd
+ function
+ """
+
+ log_calls = [call("Error [delete_vnffgd(tacker_client, 'None', 'None')"
+ "]: You need to provide VNFFGD id or VNFFGD name")]
+
+ result = os_sfc_utils.delete_vnffgd(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vnffgd_id',
+ autospec=True, return_value='vnffgd')
+ def test_delete_vnffgd(self, mock_get_vnffgd_id):
+ """
+ Checks the proper functionality of delete_vnffgd
+ function
+ """
+
+ self.mock_tacker_client.delete_vnffgd.return_value = 'deleted'
+ result = os_sfc_utils.delete_vnffgd(self.mock_tacker_client,
+ None,
+ 'vnffgd_name')
+ self.assertEqual('deleted', result)
+ mock_get_vnffgd_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnffgd_name')
+ self.mock_tacker_client.delete_vnffgd.assert_called_once_with('vnffgd')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vims_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vims
+ function when VNFFGD id is not provided
+ """
+
+ self.mock_tacker_client.list_vims.side_effect = Exception('ErrorMSG')
+ log_calls = [call('Error [list_vims(tacker_client)]: ErrorMSG')]
+
+ result = os_sfc_utils.list_vims(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list_vims.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.error.assert_has_calls(log_calls)
+
+ def test_list_vims(self):
+ """
+ Checks the proper functionality list_vims
+ function
+ """
+
+ vims = {'vims': [{'id': 'vim_obj_1'},
+ {'id': 'vim_obj_2'}]}
+
+ mock_vims = ['vim_obj_1', 'vim_obj_2']
+
+ self.mock_tacker_client.list_vims.return_value = vims
+ result = os_sfc_utils.list_vims(self.mock_tacker_client)
+ self.assertEqual(mock_vims, result)
+ self.mock_tacker_client.list_vims.assert_called_once_with(
+ retrieve_all=True)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vim_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vim
+ function when VIM id and VIM name is not provided
+ """
+
+ log_calls = [call("Error [delete_vim(tacker_client, '%s', '%s')]: %s"
+ % (None, None, 'You need to provide '
+ 'VIM id or VIM name'))]
+
+ result = os_sfc_utils.delete_vim(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vim_id',
+ autospec=True, return_value='vim_id')
+ def test_delete_vim(self, mock_get_vim_id):
+ """
+ Checks the proper functionality of delete_vim
+ function
+ """
+
+ result = os_sfc_utils.delete_vim(self.mock_tacker_client,
+ None,
+ 'vim_name')
+ assert result is self.mock_tacker_client.delete_vim.return_value
+ mock_get_vim_id.assert_called_once_with(self.mock_tacker_client,
+ 'vim_name')
+ self.mock_tacker_client.delete_vim.assert_called_once_with('vim_id')
+
+ @patch('sfc.lib.openstack_utils.get_tacker_client',
+ autospec=True, return_value='tacker_client_obj')
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_tacker_items(self,
+ mock_log,
+ mock_tacker_client):
+ """
+ Checks the proper functionality of get_tacker_items
+ function
+ """
+
+ mock_dict = {'list_vims': DEFAULT,
+ 'list_vnfds': DEFAULT,
+ 'list_vnfs': DEFAULT,
+ 'list_vnffgds': DEFAULT,
+ 'list_vnffgs': DEFAULT}
+ with patch.multiple('sfc.lib.openstack_utils',
+ **mock_dict) as mock_values:
+
+ os_sfc_utils.get_tacker_items()
+
+ mock_tacker_client.assert_called_once_with()
+ self.assertEqual(5, mock_log.debug.call_count)
+ for key in mock_values:
+ mock_values[key].assert_called_once_with('tacker_client_obj')
+
+ @patch('json.dump', autospec=True)
+ @patch('json.load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.create_vim', autospec=True)
+ def test_register_vim(self,
+ mock_create_vim,
+ mock_open,
+ mock_json_loads,
+ mock_json_dump):
+ """
+ Checks the proper functionality of register_vim
+ function
+ """
+
+ tmp_file = '/tmp/register-vim.json'
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_calls = [call('vim_file'),
+ call(tmp_file, 'w')]
+
+ mock_json_loads.return_value = {'vim': {'auth_cred':
+ {'password': None},
+ 'auth_url': None}}
+
+ json_dict = {'vim': {'auth_cred': {'password': 'os_auth_cred'},
+ 'auth_url': 'os_auth_url'}}
+
+ patch_dict = {'OS_AUTH_URL': 'os_auth_url',
+ 'OS_PASSWORD': 'os_auth_cred'}
+
+ with patch.dict('os.environ', patch_dict):
+ os_sfc_utils.register_vim(self.mock_tacker_client, 'vim_file')
+ mock_json_loads.assert_called_once_with(open_handler)
+ mock_json_dump.assert_called_once_with(json_dict,
+ mock_open(tmp_file, 'w'))
+ mock_open.assert_has_calls(open_calls, any_order=True)
+ mock_create_vim.assert_called_once_with(self.mock_tacker_client,
+ vim_file=tmp_file)
+
+ @patch('json.dump', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.create_vnf', autospec=True)
+ @patch('os.path.join',
+ autospec=True, return_value='/tmp/param_av_zone.json')
+ def test_create_vnf_in_av_zone(self,
+ mock_path_join,
+ mock_create_vnf,
+ mock_open,
+ mock_json_dump):
+ """
+ Checks the proper fucntionality of test_create_vnf_in_av_zone
+ fucntion
+ """
+
+ data = {'zone': 'av::zone'}
+ param_file = '/tmp/param_av_zone.json'
+ os_sfc_utils.create_vnf_in_av_zone(self.mock_tacker_client,
+ 'vnf_name',
+ 'vnfd_name',
+ 'vim_name',
+ 'param_file',
+ 'av::zone')
+ open_handler = mock_open.return_value.__enter__.return_value
+ mock_path_join.assert_called_once_with('/tmp', 'param_av_zone.json')
+ mock_open.assert_called_once_with(param_file, 'w+')
+ mock_json_dump.assert_called_once_with(data, open_handler)
+ mock_create_vnf.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_name',
+ vnfd_name='vnfd_name',
+ vim_name='vim_name',
+ param_file=param_file)
+
+ @patch('json.dump', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.create_vnffg', autospec=True)
+ @patch('os.path.join', autospec=True, return_value='/tmp/param_name.json')
+ def test_create_vnffg_with_param_file(self,
+ mock_path_join,
+ mock_create_vnffg,
+ mock_open,
+ mock_json_dump):
+ """
+ Checks the proper functionality of create_vnffg_with_param_file
+ function
+ """
+
+ data = {
+ 'ip_dst_prefix': 'server_ip',
+ 'net_dst_port_id': 'server_port',
+ 'net_src_port_id': 'client_port'
+ }
+ param_file = '/tmp/param_name.json'
+ os_sfc_utils.create_vnffg_with_param_file(self.mock_tacker_client,
+ 'vnffgd_name',
+ 'vnffg_name',
+ 'default_param_file',
+ 'client_port',
+ 'server_port',
+ 'server_ip')
+ open_handler = mock_open.return_value.__enter__.return_value
+ mock_path_join.assert_called_once_with('/tmp', 'param_vnffg_name.json')
+ mock_open.assert_called_once_with(param_file, 'w+')
+ mock_json_dump.assert_called_once_with(data, open_handler)
+ mock_create_vnffg.assert_called_once_with(self.mock_tacker_client,
+ vnffgd_name='vnffgd_name',
+ vnffg_name='vnffg_name',
+ param_file=param_file,
+ symmetrical=True)
diff --git a/sfc/unit_tests/unit/lib/test_test_utils.py b/sfc/unit_tests/unit/lib/test_test_utils.py
index f973b094..a7d2bfde 100644
--- a/sfc/unit_tests/unit/lib/test_test_utils.py
+++ b/sfc/unit_tests/unit/lib/test_test_utils.py
@@ -1,9 +1,9 @@
#!/usr/bin/env python
###############################################################################
-# Copyright (c) 2018 All rights reserved.
+# Copyright (c) 2018 Venkata Harshavardhan Reddy Allu and others.
#
-# This program and the accompanying materials
+# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
@@ -191,46 +191,61 @@ class SfcTestUtilsTesting(unittest.TestCase):
@patch('time.sleep', autospec=True)
@patch('sfc.lib.test_utils.logger', autospec=True)
@patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
- def test_start_http_server_returned_false(self,
- mock_run_cmd_remote,
- mock_log,
- mock_sleep):
+ def test_start_http_server_returned_false_failed_to_start(
+ self, mock_run_cmd_remote, mock_log, mock_sleep):
"""
Checks the proper functionality of start_http_server
- function when port 80 is down
+ function when http_server is failed to start
"""
cmd = "\'python -m SimpleHTTPServer 80 " + \
"> /dev/null 2>&1 &\'"
- sleep_calls = [[call(3)],
- [call(5)]]
+ rcr_calls = [call(self.ip, cmd),
+ call(self.ip, 'ps aux | grep SimpleHTTPServer')]
+ log_calls = [call('Failed to start http server')]
- rcr_calls = [[call(self.ip, cmd),
- call(self.ip, 'ps aux | grep SimpleHTTPServer')],
- [call(self.ip, 'netstat -pntl | grep :80')]]
+ mock_run_cmd_remote.side_effect = [('', '', ''),
+ ('', '', '')]
- log_calls = [[call('Failed to start http server')],
- [call('output')],
- [call('Port 80 is not up yet')]]
+ result = test_utils.start_http_server(self.ip, 1)
+ self.assertFalse(result)
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_sleep.assert_called_once_with(3)
+ mock_log.error.assert_has_calls(log_calls)
+ mock_log.info.assert_not_called()
+ mock_log.debug.assert_not_called()
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_start_http_server_returned_false_port_is_down(
+ self, mock_run_cmd_remote, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of start_http_server
+ function when port 80 is down
+ """
+
+ cmd = "\'python -m SimpleHTTPServer 80 " + \
+ "> /dev/null 2>&1 &\'"
+
+ rcr_calls = [call(self.ip, cmd),
+ call(self.ip, 'ps aux | grep SimpleHTTPServer'),
+ call(self.ip, 'netstat -pntl | grep :80')]
+
+ log_calls = [call('output'),
+ call('Port 80 is not up yet')]
mock_run_cmd_remote.side_effect = [('', '', ''),
- ('', '', ''),
- ('', '', ''),
('', 'output', ''),
('', '', '')]
- self.assertFalse(test_utils.start_http_server(self.ip, 1))
- mock_run_cmd_remote.assert_has_calls(rcr_calls[0])
- mock_sleep.assert_has_calls(sleep_calls[0])
- mock_log.error.assert_has_calls(log_calls[0])
- mock_log.info.assert_not_called()
- mock_log.debug.assert_not_called()
- self.assertFalse(test_utils.start_http_server(self.ip, 1))
- mock_run_cmd_remote.assert_has_calls(rcr_calls[0] + rcr_calls[1])
- mock_sleep.assert_has_calls(sleep_calls[0] + sleep_calls[1])
- mock_log.info.assert_has_calls(log_calls[1])
- mock_log.debug.assert_has_calls(log_calls[2])
+ result = test_utils.start_http_server(self.ip, 1)
+ self.assertFalse(result)
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_sleep.assert_called_with(5)
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.debug.assert_has_calls(log_calls[1:])
@patch('time.sleep', autospec=True)
@patch('sfc.lib.test_utils.logger', autospec=True)
diff --git a/test-requirements.txt b/test-requirements.txt
index f435e104..363f51dd 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,7 +1,8 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-coverage!=4.4,>=4.0 # Apache-2.0
-mock>=2.0.0 # BSD
-nose>=1.3.7 # LGPL
+coverage!=4.4 # Apache-2.0
+mock # BSD
+nose # LGPL
yamllint
+pylint
diff --git a/tox.ini b/tox.ini
index a92fccf2..c359c547 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = docs,docs-linkcheck,py27,yamllint
+envlist = docs,docs-linkcheck,py27,yamllint,pylint
skipsdist = True
[testenv]
@@ -41,3 +41,18 @@ files =
sfc/tests/functest
commands =
yamllint -s {[testenv:yamllint]files}
+
+[testenv:pylint]
+basepython = python2.7
+commands = pylint --rcfile=tox.ini sfc
+
+# pylintrc
+[MESSAGES CONTROL]
+disable=all
+
+enable=F,E,unreachable,duplicate-key,unnecessary-semicolon,
+ global-variable-not-assigned,unused-variable,binary-op-exception,
+ bad-format-string,anomalous-backslash-in-string,bad-open-mode
+
+[TYPECHECK]
+ignored-classes=Connection