aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc3
-rw-r--r--.gitignore2
-rw-r--r--INFO.yaml68
-rw-r--r--docs/conf.py1
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/development/index.rst16
-rw-r--r--docs/development/requirements/requirements.rst4
-rw-r--r--docs/index.rst15
-rw-r--r--docs/release/configguide/feature.configuration.rst6
-rw-r--r--docs/release/index.rst18
-rw-r--r--docs/release/release-notes/releasenotes.rst151
-rw-r--r--docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst40
-rw-r--r--docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst51
-rw-r--r--docs/release/scenarios/os-odl-sfc_fdio-ha/index.rst18
-rw-r--r--docs/release/scenarios/os-odl-sfc_fdio-ha/scenario.description.rst97
-rw-r--r--docs/release/scenarios/os-odl-sfc_fdio-noha/index.rst18
-rw-r--r--docs/release/scenarios/os-odl-sfc_fdio-noha/scenario.description.rst101
-rw-r--r--docs/release/userguide/feature.userguide.rst11
-rw-r--r--docs/requirements.txt2
-rw-r--r--requirements.txt22
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/README62
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-master.yml227
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml212
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/openstack_user_config.yml282
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_pike.yml103
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/openstack_user_config.yml186
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_pike.yml102
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/openstack_user_config.yml188
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_pike.yml102
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-master.yml22
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-pike.yml22
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/neutron.yml97
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/nova.yml115
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/all_tacker.yml14
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/haproxy_config.yml286
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_master.yml222
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_pike.yml217
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/os-tacker-install.yml63
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/setup-openstack.yml45
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker.yml36
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker_all.yml34
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/user_secrets.yml163
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-master.yml6
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-pike.yml16
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-config-files.yml20
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-master.yml30
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-pike.yml57
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml17
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/tasks/post-deployment.yml17
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_suse.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_suse.yml)11
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2131
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_suse.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_suse.yml)11
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_ubuntu.yml)20
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_suse.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_suse.yml)11
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_ubuntu.yml)20
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/vars/main.yml2
-rw-r--r--scenarios/os-odl-sfc/xci_overrides5
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/README12
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/defaults/main.yml22
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/ha/openstack_user_config.yml (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/ha/openstack_user_config.yml)78
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/mini/openstack_user_config.yml (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/mini/openstack_user_config.yml)10
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/noha/openstack_user_config.yml (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml)10
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/configure-opnfvhost.yml74
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/copy-OSA-config-files.yml20
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/install-osm.yml32
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/main.yml12
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/post-deployment.yml27
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/register-vim.yml30
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2131
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/lxd-bridge.j216
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2 (renamed from scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_ubuntu.yml)21
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2130
-rw-r--r--scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/vars/main.yml27
-rw-r--r--scenarios/os-odl-sfc_osm/xci_overrides10
-rw-r--r--setup.cfg9
-rw-r--r--sfc/lib/cleanup.py87
-rw-r--r--sfc/lib/config.py112
-rw-r--r--sfc/lib/odl_utils.py344
-rw-r--r--sfc/lib/openstack_utils.py694
-rw-r--r--sfc/lib/results.py1
-rw-r--r--sfc/lib/test_utils.py50
-rw-r--r--sfc/tests/NAME_tests.py11
-rw-r--r--sfc/tests/functest/README.tests37
-rw-r--r--sfc/tests/functest/config.yaml74
-rw-r--r--sfc/tests/functest/pod.yaml.sample58
-rw-r--r--sfc/tests/functest/register-vim.json3
-rw-r--r--sfc/tests/functest/register-vim.json-queens19
-rw-r--r--sfc/tests/functest/run_sfc_tests.py96
-rw-r--r--sfc/tests/functest/setup_scripts/compute_presetup_CI.bash27
-rw-r--r--sfc/tests/functest/setup_scripts/delete.sh8
-rw-r--r--sfc/tests/functest/setup_scripts/delete_symmetric.sh9
-rw-r--r--sfc/tests/functest/setup_scripts/prepare_odl_sfc.py92
-rw-r--r--sfc/tests/functest/setup_scripts/server_presetup_CI.bash13
-rw-r--r--sfc/tests/functest/sfc_chain_deletion.py120
-rw-r--r--sfc/tests/functest/sfc_one_chain_two_service_functions.py304
-rw-r--r--sfc/tests/functest/sfc_parent_function.py768
-rw-r--r--sfc/tests/functest/sfc_symmetric_chain.py346
-rw-r--r--sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py348
-rw-r--r--sfc/tests/functest/vnfd-templates/test-one-chain-vnfd1.yaml6
-rw-r--r--sfc/tests/functest/vnfd-templates/test-one-chain-vnfd2.yaml6
-rw-r--r--sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml21
-rw-r--r--sfc/tests/functest/vnfd-templates/test-two-chains-vnfd1.yaml6
-rw-r--r--sfc/tests/functest/vnfd-templates/test-two-chains-vnfd2.yaml6
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml40
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml10
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml48
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml19
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml20
-rw-r--r--sfc/unit_tests/__init__.py (renamed from sfc/tests/functest/setup_scripts/__init__.py)0
-rw-r--r--sfc/unit_tests/unit/__init__.py0
-rw-r--r--sfc/unit_tests/unit/lib/test_cleanup.py469
-rw-r--r--sfc/unit_tests/unit/lib/test_odl_utils.py817
-rw-r--r--sfc/unit_tests/unit/lib/test_openstack_utils.py2504
-rw-r--r--sfc/unit_tests/unit/lib/test_test_utils.py543
-rw-r--r--test-requirements.txt8
-rw-r--r--tox.ini58
116 files changed, 8684 insertions, 4010 deletions
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 00000000..fe258c6c
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,3 @@
+[report]
+exclude_lines =
+ if __name__ == .__main__.:
diff --git a/.gitignore b/.gitignore
index 58dd4eed..ffe9352b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,3 +4,5 @@
*pyc
/sfc.egg-info/
*ovs-logs/
+.tox/
+docs/_build/
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 00000000..869357a0
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,68 @@
+---
+project: 'Service Function Chaining (sfc)'
+project_creation_date: 'May 5, 2015'
+project_category: 'Collaborative Development'
+lifecycle_state: 'Incubation'
+project_lead: &opnfv_sfc_ptl
+ name: 'Manuel Buil'
+ email: 'manuelbuil87@gmail.com'
+ company: 'gmail.com'
+ timezone: 'Unkown'
+ id: 'mbuil'
+primary_contact: *opnfv_sfc_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.opnfv.org/projects/sfc'
+ key: 'sfc'
+mailing_list:
+ type: 'mailman2'
+ url: 'opnfv-tech-discuss@lists.opnfv.org'
+ tag: '[sfc]'
+realtime_discussion:
+ type: irc
+ server: 'freenode.net'
+ channel: '#opnfv-sfc'
+meetings:
+ - type: 'gotomeeting+irc'
+ agenda: # eg: 'https://wiki.opnfv.org/display/'
+ url: # eg: 'https://global.gotomeeting.com/join/819733085'
+ server: 'freenode.net'
+ channel: '#opnfv-meeting'
+ repeats: 'weekly'
+ time: # eg: '16:00 UTC'
+repositories:
+ - 'sfc'
+committers:
+ - <<: *opnfv_sfc_ptl
+ - name: 'Brady Johnson'
+ email: 'brady.allen.johnson@ericsson.com'
+ company: 'ericsson.com'
+ id: 'ebrjohn'
+ - name: 'Reinaldo Penno'
+ email: 'rapenno@gmail.com'
+ company: 'gmail.com'
+ id: 'repenno'
+ - name: 'Sam Hague'
+ email: 'shague@redhat.com'
+ company: 'redhat.com'
+ id: 'shague'
+ - name: 'Vishal Murgai'
+ email: 'vmurgai@cavium.com'
+ company: 'cavium.com'
+ id: 'vmurgai'
+ - name: 'Tim Rozet'
+ email: 'trozet@redhat.com'
+ company: 'redhat.com'
+ id: 'trozet'
+ - name: 'Manuel Buil'
+ email: 'manuelbuil87@gmail.com'
+ company: 'gmail.com'
+ id: 'mbuil'
+ - name: 'Dimitrios Markou'
+ email: 'mardim@intracom-telecom.com'
+ company: 'intracom-telecom.com'
+ id: 'mardim'
+tsc:
+ # yamllint disable rule:line-length
+ approval: ''
+ # yamllint enable rule:line-length
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 00000000..86ab8c57
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1 @@
+from docs_conf.conf import * # flake8: noqa
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 00000000..d38bbda4
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: sfc
diff --git a/docs/development/index.rst b/docs/development/index.rst
new file mode 100644
index 00000000..5644372b
--- /dev/null
+++ b/docs/development/index.rst
@@ -0,0 +1,16 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. SPDX-License-Identifier: CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+.. _sfc-development:
+
+=========================
+Development Documentation
+=========================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 2
+
+ requirements/index
+ design/index
diff --git a/docs/development/requirements/requirements.rst b/docs/development/requirements/requirements.rst
index e83a3e7e..00b77354 100644
--- a/docs/development/requirements/requirements.rst
+++ b/docs/development/requirements/requirements.rst
@@ -16,7 +16,7 @@ in an OPNFV environment.
Detailed Requirements
+++++++++++++++++++++
-These are the Euphrates specific requirements:
+These are the Fraser specific requirements:
1 The supported Service Chaining encapsulation will be NSH VXLAN-GPE.
@@ -36,7 +36,7 @@ These are the Euphrates specific requirements:
Long Term Requirements
++++++++++++++++++++++
-These requirements are out of the scope of the Euphrates release.
+These requirements are out of the scope of the Fraser release.
1 Dynamic movement of SFs across multiple Compute nodes.
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 00000000..73e87a9b
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,15 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. SPDX-License-Identifier: CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+.. _sfc:
+
+=========================
+Service Function Chaining
+=========================
+
+.. toctree::
+ :maxdepth: 1
+
+ release/index
+ development/index
diff --git a/docs/release/configguide/feature.configuration.rst b/docs/release/configguide/feature.configuration.rst
index 37f381d0..4cf2718f 100644
--- a/docs/release/configguide/feature.configuration.rst
+++ b/docs/release/configguide/feature.configuration.rst
@@ -12,10 +12,8 @@ SFC feature desciription
For details of the scenarios and their provided capabilities refer to
the scenario description documents:
-- http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/scenarios/os-odl-sfc-ha/index.html
-
-- http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/scenarios/os-odl-sfc-noha/index.html
-
+- :ref:`<os-odl-sfc-ha>`
+- :ref:`<os-odl-sfc-noha>`
The SFC feature enables creation of Service Fuction Chains - an ordered list
of chained network funcions (e.g. firewalls, NAT, QoS)
diff --git a/docs/release/index.rst b/docs/release/index.rst
new file mode 100644
index 00000000..2fc72e92
--- /dev/null
+++ b/docs/release/index.rst
@@ -0,0 +1,18 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. SPDX-License-Identifier: CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+.. _sfc-release:
+
+====================
+Release Documetation
+====================
+
+.. toctree::
+ :numbered:
+ :maxdepth: 1
+
+ configguide/index
+ release-notes/index
+ scenarios/index
+ userguide/index
diff --git a/docs/release/release-notes/releasenotes.rst b/docs/release/release-notes/releasenotes.rst
index 32953313..22ae740e 100644
--- a/docs/release/release-notes/releasenotes.rst
+++ b/docs/release/release-notes/releasenotes.rst
@@ -5,26 +5,24 @@
Abstract
========
-This document compiles the release notes for the Euphrates release of
+This document compiles the release notes for the Hunter release of
OPNFV SFC
Important notes
===============
These notes provide release information for the use of SFC with the
-Apex installer and xci tools for the Euphrates release of OPNFV.
+Apex installer and xci tool for the Hunter release of OPNFV.
Summary
=======
-The goal of the SFC Euphrates release is to integrate the OpenDaylight
-SFC project into an OPNFV environment, with either the Apex installer or
-xci tools. In subsequent releases, we expect Compass4NFV to integrate
-the SFC scenarios too.
+The goal of the SFC release is to integrate the OpenDaylight SFC project
+into an OPNFV environment, with either the Apex installer or xci tool.
More information about OpenDaylight and SFC can be found here.
-- `OpenDaylight <http://www.opendaylight.org/software>`_ version "Nitrogen SR1"
+- `OpenDaylight <http://www.opendaylight.org>`_ version "Fluorine SR1"
- `Service function chaining <https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home>`_
@@ -33,11 +31,9 @@ More information about OpenDaylight and SFC can be found here.
- Overall OPNFV documentation
- - `Design document <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/development/design/index.html>`_
-
- - `User Guide <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/userguide/index.html>`_
-
- - `Installation Instructions <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/configguide/index.html>`_
+ - :ref:`Design document <sfc-design>`
+ - :ref:`User Guide <sfc-userguide>`
+ - :ref:`Installation Instructions <sfc-configguide>`
- Release Notes (this document)
@@ -49,18 +45,16 @@ Release Data
| **Project** | sfc |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | euphrates 1.0 |
+| **Repo/tag** | opnfv-8.0.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Euphrates base release |
+| **Release designation** | Hunter 8.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 6th October 2017 |
+| **Release date** | May 10th, 2019 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Integrate neutron networking-sfc |
-| | and use the latest tacker code. Move |
-| | to OpenStack ocata and ODL Nitrogen |
+| **Purpose of the delivery** | Project maintenance |
+--------------------------------------+--------------------------------------+
Version change
@@ -70,22 +64,20 @@ Module version changes
~~~~~~~~~~~~~~~~~~~~~~
This release of OPNFV sfc is based on following upstream versions:
-- OpenStack Ocata release
+- OpenStack Rocky release
-- OpenDaylight Nitrogen SR1 release
+- OpenDaylight Fluorine SR1 release
-- Open vSwitch 2.6.1 with Yi Yang NSH patch
+- Open vSwitch 2.9.2
Document changes
~~~~~~~~~~~~~~~~
-This is the first tracked version of OPNFV SFC Euphrates. It comes with
+This is the first tracked version of OPNFV SFC Hunter. It comes with
the following documentation:
-- `Design document <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/development/design/index.html>`_
-
-- `User Guide <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/userguide/index.html>`_
-
-- `Installation Instructions <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/configguide/index.html>`_
+- :ref:`Design document <sfc-design>`
+- :ref:`User Guide <sfc-userguide:>`
+- :ref:`Installation Instructions <sfc-configguide:>`
- Release notes (This document)
@@ -95,51 +87,23 @@ Reason for version
Feature additions
~~~~~~~~~~~~~~~~~
-- `Integration with neutron networking-sfc`
-- `Moved to latest tacker code`
-- `Started using forwarding graphs as a way to configure SFC`
-- `Created compatibility with latest functest (based on Alpine containers)`
-
Bug corrections
~~~~~~~~~~~~~~~
-**JIRA TICKETS:**
-
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-103>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-104>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-105>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-106>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-107>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-108>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-109>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-110>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-111>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-112>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-113>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-114>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-116>`_
-`Bug-fixes <https://jira.opnfv.org/browse/SFC-117>`_
-
-Apart from the OPNFV bug fixes, there were some bugs in ODL and Tacker which
-were fixed as well.
-
Deliverables
------------
Software deliverables
~~~~~~~~~~~~~~~~~~~~~
-No specific deliverables are created, as SFC is included with Apex.
+No specific deliverables are created, as SFC is included with Apex
Documentation deliverables
~~~~~~~~~~~~~~~~~~~~~~~~~~
-- `Design document <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/development/design/index.html>`_
-
-- `User Guide <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/userguide/index.html>`_
-
-- `Installation Instructions <http://docs.opnfv.org/en/stable-euphrates/submodules/sfc/docs/release/configguide/index.html>`_
-
+- :ref:`Design document <sfc-design>`
+- :ref:`User Guide <sfc-userguide:>`
+- :ref:`Installation Instructions <sfc-configguide:>`
- Release notes (This document)
Known Limitations, Issues and Workarounds
@@ -148,75 +112,18 @@ Known Limitations, Issues and Workarounds
System Limitations
------------------
-The Euphrates 3.0 release has a few limitations:
-
-1 - The testcase sfc_two_chains_SSH_and_HTTP is disabled in this release due to
-bugs in ODL, Tacker and networking-sfc:
-
-https://bugs.opendaylight.org/show_bug.cgi?id=9221
-https://bugs.launchpad.net/tacker/+bug/1719839
-https://bugs.launchpad.net/tacker/+bug/1719876
-https://bugs.launchpad.net/networking-sfc/+bug/1719835
-https://bugs.launchpad.net/networking-sfc/+bug/1719883
-
-2 - The topology CLIENT_SERVER_SAME_HOST does not work due to a bug in the
-vxlan_tool. This tool is part of the ODL-SFC repo and provides support for
-non-NSH-aware SFs:
-
-https://bugs.opendaylight.org/show_bug.cgi?id=9219
-
-3 - The topologies CLIENT_SERVER_DIFFERENT_HOST_SPLIT_VNF and
-CLIENT_SERVER_SAME_HOST_SPLIT_VNF do not work because of a ODL bug:
-
-https://bugs.opendaylight.org/show_bug.cgi?id=9220
-
-
Known issues
------------
-1 - OpenDaylight SFC relies on a version of Open vSwitch (OVS) with
-Network Service Headers (NSH). A version of OVS with NSH currently
-exists, but it is in a branched version of OVS. Extensive upstream
-work has been done to merge the NSH patches into mainstream OVS,
-but the work is still not complete. More information about this
-can be found in the OPNFV SFC design document (link provided above).
-
-2 - Due to a bug in tacker:
-
-https://bugs.launchpad.net/tacker/+bug/1719841
-
-it is not possible to run the SFC scenarios in openstack environments
-which require SSL connections to public endpoints and use self-signed
-certificates
-
Workarounds
-----------
-There is a way to avoid the known issue number 2 when using xci. Once
-the deployment is successfully done, go to tacker server and modify
-line 242 of the file:
-
-/openstack/venvs/tacker-15.1.7/lib/python2.7/site-packages/keystoneauth1/session.py
-
-So that instead of having:
-
-self.verify = verify
-
-It has:
-
-self.verify = False
-
-Forcing tacker to not check the certificates
-
-
Test results
============
-The Euphrates release of SFC has undergone QA test runs
-with Functest tests on the Apex installer and xci utility
References
==========
-For more information on the OPNFV Euphrates release, please see:
+For more information on the OPNFV Hunter release, please see:
OPNFV
-----
@@ -225,12 +132,12 @@ OPNFV
2) `OPNFV documentation- and software downloads <https://www.opnfv.org/software/download>`_
-3) `OPNFV Danube release <http://wiki.opnfv.org/releases/euphrates>`_
+3) `OPNFV Hunter release <https://docs.opnfv.org/en/latest/index.html>`_
OpenStack
---------
-4) `OpenStack Newton Release artifacts <http://www.openstack.org/software/ocata>`_
+4) `OpenStack Rocky Release artifacts <http://www.openstack.org/software/rocky>`_
5) `OpenStack documentation <http://docs.openstack.org>`_
@@ -238,9 +145,3 @@ OpenDaylight
------------
6) `OpenDaylight artifacts <http://www.opendaylight.org/software/downloads>`_
-
-Open vSwitch with NSH
----------------------
-
-7) https://github.com/yyang13/ovs_nsh_patches
-
diff --git a/docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst b/docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst
index 11b41434..b9195466 100644
--- a/docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst
+++ b/docs/release/scenarios/os-odl-sfc-ha/scenario.description.rst
@@ -8,7 +8,7 @@ Introduction
The os-odl-sfc-ha is intended to be used to install the OPNFV SFC project in a standard
OPNFV High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC project
-into the OPNFV environment. The OPNFV SFC Euphrates release uses the OpenDaylight Nitrogen SR1 release.
+into the OPNFV environment. The OPNFV SFC Gambia release uses the OpenDaylight Fluorine SR1 release.
Scenario components and composition
===================================
@@ -53,11 +53,14 @@ will automatically be installed.
The VNF Manager
---------------
-In order to create a VM for each Service Function, a VNF Manager is needed. The OPNFV
+In order to create a VM for each Service Function, a VNF Manager is recommended. The OPNFV
SFC project currently uses the Tacker OpenStack project as a VNF Manager. Tacker is
installed on the controller node and manages VNF life cycle, and coordinates VM creation
and SFC configuration with OpenStack and OpenDaylight SFC project.
+It is also possible to run tests without a VNF Manager, steering SFC through networking-sfc
+project
+
Scenario usage overview
=======================
.. Provide a brief overview on how to use the scenario and the features available to the
@@ -66,7 +69,7 @@ Scenario usage overview
Once this scenario is installed, it will be possible to create Service Chains and
classification entries to map tenant traffic to individual, pre-defined Service Chains.
-All configuration can be performed using the Tacker CLI.
+All configuration can be performed using the Tacker CLI or the networking-sfc CLI.
Limitations, Issues and Workarounds
===================================
@@ -74,23 +77,24 @@ Limitations, Issues and Workarounds
.. faults or bugs. If the system design only provide some expected functionality then provide
.. some insight at this point.
-The *client* virtual machine needs to be located in a compute node where at least
-one of the service functions (SFs) is placed. This is due to a limitation in OpenDaylight,
-Nitrogen, which only installs the traffic classifier in the compute nodes where the SFs are.
-
Specific version of OVS
-----------------------
-SFC needs changes in OVS to include the Network Service Headers (NSH) Service Chaining
-encapsulation. This OVS patch has been ongoing for quite a while (2 years+), and still
-has not been officially merged. Previously, SFC used NSH from a branched version of OVS
-based on 2.3.90, called the "Pritesh Patch". In the OpenDaylight Nitrogen SR1 release, SFC was
-changed to use a newer, branched version of OVS based on 2.6.1, called the "Yi Yang
-Patch".
+SFC needs OVS 2.9.2 or higher because it includes the Network Service Headers (NSH)
+Service Chaining encapsulation.
+
+How to deploy the scenario
+==========================
+
+There are three tools which can be used to deploy the scenario:
+
+- Apex - https://opnfv-apex.readthedocs.io/en/latest/release/installation/index.html#apex-installation
+- XCI tool - https://opnfv-releng-xci.readthedocs.io/en/stable/xci-user-guide.html#user-guide
+- Compass - https://opnfv-compass4nfv.readthedocs.io/en/stable-gambia/release/installation/index.html#compass4nfv-installation-instructions
+
+For more information about how to deploy the sfc scenario, check:
-The older version of OVS only supported VXLAN-GPE + NSH encapsulation, but the newer
-version supports both ETH + NSH and VXLAN-GPE + ETH + NSH. Currently SFC is only
-implemented with VXLAN-GPE + ETH + NSH.
+https://wiki.opnfv.org/display/sfc/Deploy+OPNFV+SFC+scenarios
References
==========
@@ -101,6 +105,6 @@ https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home
https://wiki.opendaylight.org/view/Service_Function_Chaining:Main
-For more information on the OPNFV Euphrates release, please visit:
+For more information on the OPNFV Gambia release, please visit:
-http://www.opnfv.org/euphrates
+https://docs.opnfv.org/en/stable-gambia/index.html
diff --git a/docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst b/docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst
index e74e47c4..11f787c5 100644
--- a/docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst
+++ b/docs/release/scenarios/os-odl-sfc-noha/scenario.description.rst
@@ -7,9 +7,8 @@ Introduction
.. In this section explain the purpose of the scenario and the types of capabilities provided
The os-odl-sfc-noha is intended to be used to install the OPNFV SFC project in a standard
-OPNFV Non-High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC
-project into the OPNFV environment. The OPNFV SFC Euphrates release uses the OpenDaylight
-Nitrogen SR1 release.
+OPNFV High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC project
+into the OPNFV environment. The OPNFV SFC Gambia release uses the OpenDaylight Fluorine SR1 release.
Scenario components and composition
===================================
@@ -18,9 +17,8 @@ Scenario components and composition
.. to communicate to the user the capabilities available in this scenario.
This scenario installs everything needed to use the SFC OpenDaylight project in an OPNFV
-environment. Since this scenario is Non-High Availability, then only one controller and
-one compute node will be deployed. The classifier used in this scenario is implemented
-by the Netvirt OpenDaylight project.
+environment. The classifier used in this scenario is implemented by the Netvirt OpenDaylight
+project.
Following is a detailed list of what is included with this scenario:
@@ -55,10 +53,13 @@ will automatically be installed.
The VNF Manager
---------------
-In order to create a VM for each Service Function, a VNF Manager is needed. The OPNFV
+In order to create a VM for each Service Function, a VNF Manager is recommended. The OPNFV
SFC project currently uses the Tacker OpenStack project as a VNF Manager. Tacker is
installed on the controller node and manages VNF life cycle, and coordinates VM creation
-with the OpenDaylight SFC project.
+and SFC configuration with OpenStack and OpenDaylight SFC project.
+
+It is also possible to run tests without a VNF Manager, steering SFC through networking-sfc
+project
Scenario usage overview
=======================
@@ -68,7 +69,7 @@ Scenario usage overview
Once this scenario is installed, it will be possible to create Service Chains and
classification entries to map tenant traffic to individual, pre-defined Service Chains.
-All configuration can be performed using the Tacker CLI.
+All configuration can be performed using the Tacker CLI or the networking-sfc CLI.
Limitations, Issues and Workarounds
===================================
@@ -76,23 +77,24 @@ Limitations, Issues and Workarounds
.. faults or bugs. If the system design only provide some expected functionality then provide
.. some insight at this point.
-The *client* virtual machine needs to be located in a compute node where at least
-one of the service functions (SFs) is placed. This is due to a limitation in OpenDaylight,
-Nitrogen, which only installs the traffic classifier in the compute nodes where the SFs are.
-
Specific version of OVS
-----------------------
-SFC needs changes in OVS to include the Network Service Headers (NSH) Service Chaining
-encapsulation. This OVS patch has been ongoing for quite a while (2 years+), and still
-has not been officially merged. Previously, SFC used NSH from a branched version of OVS
-based on 2.3.90, called the "Pritesh Patch". In the OpenDaylight Nitrogen SR1 release, SFC was
-changed to use a newer, branched version of OVS based on 2.6.1, called the "Yi Yang
-Patch".
+SFC needs OVS 2.9.2 or higher because it includes the Network Service Headers (NSH)
+Service Chaining encapsulation.
+
+How to deploy the scenario
+==========================
+
+There are three tools which can be used to deploy the scenario:
-The older version of OVS only supported VXLAN-GPE + NSH encapsulation, but the newer
-version supports both ETH + NSH and VXLAN-GPE + ETH + NSH. Currently SFC is only
-implemented with VXLAN-GPE + ETH + NSH.
+- Apex - https://opnfv-apex.readthedocs.io/en/latest/release/installation/index.html#apex-installation
+- XCI tool - https://opnfv-releng-xci.readthedocs.io/en/stable/xci-user-guide.html#user-guide
+- Compass - https://opnfv-compass4nfv.readthedocs.io/en/stable-gambia/release/installation/index.html#compass4nfv-installation-instructions
+
+For more information about how to deploy the sfc scenario, check:
+
+https://wiki.opnfv.org/display/sfc/Deploy+OPNFV+SFC+scenarios
References
==========
@@ -103,7 +105,6 @@ https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home
https://wiki.opendaylight.org/view/Service_Function_Chaining:Main
-For more information on the OPNFV Euphrates release, please visit:
-
-http://www.opnfv.org/euphrates
+For more information on the OPNFV Gambia release, please visit:
+https://docs.opnfv.org/en/stable-gambia/index.html
diff --git a/docs/release/scenarios/os-odl-sfc_fdio-ha/index.rst b/docs/release/scenarios/os-odl-sfc_fdio-ha/index.rst
deleted file mode 100644
index 28413b2e..00000000
--- a/docs/release/scenarios/os-odl-sfc_fdio-ha/index.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-.. _os-odl-sfc_fdio-ha:
-
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-=========================================
-os-odl-sfc_fdio-ha overview and description
-=========================================
-.. This document will be used to provide a description of the scenario for an end user.
-.. You should explain the purpose of the scenario, the types of capabilities provided and
-.. the unique components that make up the scenario including how they are used.
-
-.. toctree::
- :maxdepth: 3
-
- ./scenario.description.rst
-
diff --git a/docs/release/scenarios/os-odl-sfc_fdio-ha/scenario.description.rst b/docs/release/scenarios/os-odl-sfc_fdio-ha/scenario.description.rst
deleted file mode 100644
index 39efcacd..00000000
--- a/docs/release/scenarios/os-odl-sfc_fdio-ha/scenario.description.rst
+++ /dev/null
@@ -1,97 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-Introduction
-============
-.. In this section explain the purpose of the scenario and the types of capabilities provided
-
-The os-odl-sfc_fdio-ha is intended to be used to install the OPNFV SFC project in a standard
-OPNFV High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC project
-into the OPNFV environment. The OPNFV SFC Euphrates release uses the OpenDaylight Nitrogen SR1 release.
-
-Scenario components and composition
-===================================
-.. In this section describe the unique components that make up the scenario,
-.. what each component provides and why it has been included in order
-.. to communicate to the user the capabilities available in this scenario.
-
-This scenario installs everything needed to use the SFC OpenDaylight project in an OPNFV
-environment. The classifier used in this scenario is implemented by the Netvirt OpenDaylight
-project.
-
-Following is a detailed list of what is included with this scenario:
-
-OpenDaylight features installed
--------------------------------
-
-The OpenDaylight SDN controller is installed in the controller node.
-
-The following are the SFC features that get installed:
-
-- odl-sfc-model
-- odl-sfc-provider
-- odl-sfc-provider-rest
-- odl-sfc-ovs
-- odl-sfc-openflow-renderer
-
-The following are the Netvirt features that get installed:
-
-- odl-netvirt-openstack
-- odl-sfc-genius
-- odl-neutron-service
-- odl-neutron-northbound-api
-- odl-neutron-spi
-- odl-neutron-transcriber
-- odl-ovsdb-southbound-impl-api
-- odl-ovsdb-southbound-impl-impl
-- odl-ovsdb-library
-
-By simply installing the odl-netvirt-sfc feature, all the dependant features
-will automatically be installed.
-
-The VNF Manager
----------------
-
-In order to create a VM for each Service Function, a VNF Manager is needed. The OPNFV
-SFC project currently uses the Tacker OpenStack project as a VNF Manager. Tacker is
-installed on the controller node and manages VNF life cycle, and coordinates VM creation
-and SFC configuration with OpenStack and OpenDaylight SFC project.
-
-Scenario usage overview
-=======================
-.. Provide a brief overview on how to use the scenario and the features available to the
-.. user. This should be an "introduction" to the userguide document, and explicitly link to it,
-.. where the specifics of the features are covered including examples and API's
-
-Once this scenario is installed, it will be possible to create Service Chains and
-classification entries to map tenant traffic to individual, pre-defined Service Chains.
-All configuration can be performed using the Tacker CLI.
-
-Limitations, Issues and Workarounds
-===================================
-.. Explain scenario limitations here, this should be at a design level rather than discussing
-.. faults or bugs. If the system design only provide some expected functionality then provide
-.. some insight at this point.
-
-The *client* virtual machine needs to be located in a compute node where at least
-one of the service functions (SFs) is placed. This is due to a limitation in OpenDaylight,
-Nitrogen, which only installs the traffic classifier in the compute nodes where the SFs are.
-
-Specific version of FD.IO
------------------------
-
-TO BE ADDED
-
-References
-==========
-
-For more information about SFC, please visit:
-
-https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home
-
-https://wiki.opendaylight.org/view/Service_Function_Chaining:Main
-
-For more information on the OPNFV Euphrates release, please visit:
-
-http://www.opnfv.org/euphrates
diff --git a/docs/release/scenarios/os-odl-sfc_fdio-noha/index.rst b/docs/release/scenarios/os-odl-sfc_fdio-noha/index.rst
deleted file mode 100644
index a77bc4c5..00000000
--- a/docs/release/scenarios/os-odl-sfc_fdio-noha/index.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-.. _os-odl-sfc_fdio-noha:
-
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-===========================================
-os-odl-sfc_fdio-noha overview and description
-===========================================
-.. This document will be used to provide a description of the scenario for an end user.
-.. You should explain the purpose of the scenario, the types of capabilities provided and
-.. the unique components that make up the scenario including how they are used.
-
-.. toctree::
- :maxdepth: 3
-
- ./scenario.description.rst
-
diff --git a/docs/release/scenarios/os-odl-sfc_fdio-noha/scenario.description.rst b/docs/release/scenarios/os-odl-sfc_fdio-noha/scenario.description.rst
deleted file mode 100644
index 6ef8c4ba..00000000
--- a/docs/release/scenarios/os-odl-sfc_fdio-noha/scenario.description.rst
+++ /dev/null
@@ -1,101 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) <optionally add copywriters name>
-
-Introduction
-============
-.. In this section explain the purpose of the scenario and the types of capabilities provided
-
-The os-odl-sfc_fdio-noha is intended to be used to install the OPNFV SFC project in a standard
-OPNFV Non-High Availability mode. The OPNFV SFC project integrates the OpenDaylight SFC
-project into the OPNFV environment. The OPNFV SFC Euphrates release uses the OpenDaylight
-Nitrogen SR1 release.
-
-Scenario components and composition
-===================================
-.. In this section describe the unique components that make up the scenario,
-.. what each component provides and why it has been included in order
-.. to communicate to the user the capabilities available in this scenario.
-
-This scenario installs everything needed to use the SFC OpenDaylight project in an OPNFV
-environment. Since this scenario is Non-High Availability, then only one controller and
-one compute node will be deployed. The classifier used in this scenario is implemented
-by the Netvirt OpenDaylight project.
-
-Following is a detailed list of what is included with this scenario:
-
-OpenDaylight features installed
--------------------------------
-
-The OpenDaylight SDN controller is installed in the controller node.
-
-The following are the SFC features that get installed:
-
-- odl-sfc-model
-- odl-sfc-provider
-- odl-sfc-provider-rest
-- odl-sfc-ovs
-- odl-sfc-openflow-renderer
-
-The following are the Netvirt features that get installed:
-
-- odl-netvirt-openstack
-- odl-sfc-genius
-- odl-neutron-service
-- odl-neutron-northbound-api
-- odl-neutron-spi
-- odl-neutron-transcriber
-- odl-ovsdb-southbound-impl-api
-- odl-ovsdb-southbound-impl-impl
-- odl-ovsdb-library
-
-By simply installing the odl-netvirt-sfc feature, all the dependant features
-will automatically be installed.
-
-The VNF Manager
----------------
-
-In order to create a VM for each Service Function, a VNF Manager is needed. The OPNFV
-SFC project currently uses the Tacker OpenStack project as a VNF Manager. Tacker is
-installed on the controller node and manages VNF life cycle, and coordinates VM creation
-with the OpenDaylight SFC project.
-
-Scenario usage overview
-=======================
-.. Provide a brief overview on how to use the scenario and the features available to the
-.. user. This should be an "introduction" to the userguide document, and explicitly link to it,
-.. where the specifics of the features are covered including examples and API's
-
-Once this scenario is installed, it will be possible to create Service Chains and
-classification entries to map tenant traffic to individual, pre-defined Service Chains.
-All configuration can be performed using the Tacker CLI.
-
-Limitations, Issues and Workarounds
-===================================
-.. Explain scenario limitations here, this should be at a design level rather than discussing
-.. faults or bugs. If the system design only provide some expected functionality then provide
-.. some insight at this point.
-
-The *client* virtual machine needs to be located in a compute node where at least
-one of the service functions (SFs) is placed. This is due to a limitation in OpenDaylight,
-Nitrogen, which only installs the traffic classifier in the compute nodes where the SFs are.
-
-Specific version of FD.IO
------------------------
-
-TO BE ADDED
-
-
-References
-==========
-
-For more information about SFC, please visit:
-
-https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home
-
-https://wiki.opendaylight.org/view/Service_Function_Chaining:Main
-
-For more information on the OPNFV Euphrates release, please visit:
-
-http://www.opnfv.org/euphrates
-
diff --git a/docs/release/userguide/feature.userguide.rst b/docs/release/userguide/feature.userguide.rst
index 0e9ce2cf..050a0c86 100644
--- a/docs/release/userguide/feature.userguide.rst
+++ b/docs/release/userguide/feature.userguide.rst
@@ -36,6 +36,17 @@ SFC capabilities and usage
The OPNFV SFC feature can be deployed with either the "os-odl-sfc-ha" or the
"os-odl-sfc-noha" scenario. SFC usage for both of these scenarios is the same.
+Once the deployment has been completed, the SFC test cases use information
+(e.g. INSTALLER IP, Controller IP, etc) of the environment which have been
+retrieved first from the installer in order to execute the SFC test cases properly.
+This is the default behavior.
+In case there is not an installer in place and the server for the SFC test execution
+has been prepared manually, installing all necessary components (e.g. OpenStack OpenDayLight etc)
+by hand. The user should update the "pod.yaml" file, including the all necessary details
+for each node which participates in the scenario.
+In case the dovetail project triggers the SFC test scenarios, the "pod.yaml" file will be prepared
+by dovetail project automatically.
+
As previously mentioned, Tacker is used as a VNF Manager and SFC Orchestrator. All
the configuration necessary to create working service chains and classifiers can
be performed using the Tacker command line. Refer to the `Tacker walkthrough <https://github.com/trozet/sfc-random/blob/master/tacker_sfc_apex_walkthrough.txt>`_
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 00000000..dcb7a594
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,2 @@
+lfdocs-conf
+sphinx-opnfv-theme
diff --git a/requirements.txt b/requirements.txt
index dce5e2e0..4d464973 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,14 +1,18 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr!=2.1.0,>=2.0.0 # Apache-2.0
-paramiko>=2.0 # LGPLv2.1+
-python-glanceclient>=2.8.0 # Apache-2.0
-requests>=2.14.2 # Apache-2.0
-xmltodict>=0.10.1 # MIT
-python-keystoneclient>=3.8.0 # Apache-2.0
-python-novaclient>=9.0.0 # Apache-2.0
-python-tackerclient>=0.8.0 # Apache-2.0
-PyYAML>=3.10.0 # MIT
+pbr!=2.1.0 # Apache-2.0
+paramiko # LGPLv2.1+
+python-glanceclient # Apache-2.0
+requests!=2.20.0 # Apache-2.0
+xmltodict # MIT
+python-keystoneclient!=2.1.0 # Apache-2.0
+python-novaclient # Apache-2.0
+python-tackerclient # Apache-2.0
+python-neutronclient # Apache-2.0
+networking-sfc>=7.0.0
+PyYAML # MIT
opnfv
snaps
+xtesting # Apache-2.0
+functest
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/README b/scenarios/os-odl-sfc/role/os-odl-sfc/README
deleted file mode 100644
index 3cb8cb29..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/README
+++ /dev/null
@@ -1,62 +0,0 @@
-This is the role which deploys the os-odl-sfc scenarios in xci.
-
-This role currently works with:
-
-- OpenStack stable/pike
-- ODL Nitrogen RC1
-- OVS+NSH patch
-- OpenSUSE 42.3 or Ubuntu 16.04
-
-# PREPARATIONS #
-
-1 - If you don’t have a key already, generate an SSH key in $HOME/.ssh
-ssh-keygen -t rsa
-
-2 - Clone OPNFV releng-xci repository
-git clone https://gerrit.opnfv.org/gerrit/releng-xci.git
-
-3 - Change into directory where the sandbox script is located:
-cd releng-xci/xci
-
-4 - Use a version of releng-xci which we know works
-
-git checkout cf2cd4e4b87a5e392bc4ba49749a349925ba2f86
-
-Then, depending on the scenario which will be run:
-
-## os-odl-sfc-noha ##
-
-To run os-odl-sfc-noha you should export the following variables before
-running xci-deploy.sh. Note that you should change xxxx by the path where
-your releng-xci code is:
-
-export XCI_FLAVOR=noha
-export OPNFV_SCENARIO=os-odl-sfc
-export OPENSTACK_OSA_VERSION=stable/pike
-export VM_MEMORY_SIZE=16384
-export OPENSTACK_BIFROST_VERSION=bd7e99bf7a00e4c9ad7d03d752d7251e3caf8509
-
-## os-odl-sfc-ha ##
-
-To run os-odl-sfc-ha you should export the following variables before
-running xci-deploy.sh:
-
-export XCI_FLAVOR=ha
-export OPNFV_SCENARIO=os-odl-sfc
-export OPENSTACK_OSA_VERSION=stable/pike
-export VM_MEMORY_SIZE=20480
-export OPENSTACK_BIFROST_VERSION=bd7e99bf7a00e4c9ad7d03d752d7251e3caf8509
-
-
-# LIMITATIONS #
-
-1 - It is using a private branch for the os-neutron role. This is because
-there are several patches pending to be upstreamed. This is the branch we
-are using:
-
-https://github.com/manuelbuil/openstack-ansible-os_neutron/tree/testing-ovs-nsh2
-
-We will stop doing this as soon as the patches are merged upstream
-
-2 - It is using a private branch for tacker code because a bug does not
-allow SSL. We will stop doing this as soon as the bug is fixed
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-master.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-master.yml
deleted file mode 100644
index a82e01ed..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-master.yml
+++ /dev/null
@@ -1,227 +0,0 @@
----
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-# these versions are based on the osa commit 7b3aac28a0a87e5966527829f6b0abcbc2303cc7 on 2017-12-11
-# https://review.openstack.org/gitweb?p=openstack/openstack-ansible.git;a=commit;h=7b3aac28a0a87e5966527829f6b0abcbc2303cc7
-- name: ansible-hardening
- scm: git
- src: https://git.openstack.org/openstack/ansible-hardening
- version: 46a94c72518f83d27b25a5fa960dde7130956215
-- name: apt_package_pinning
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
- version: eba07d7dd7962d90301c49fc088551f9b35f367a
-- name: pip_install
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-pip_install
- version: 32c27505c6e0ee00ea0fb4a1c62240c60f17a0e3
-- name: galera_client
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-galera_client
- version: 9a8302cbba24ea4e5907567e5f93e874d30d79df
-- name: galera_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-galera_server
- version: aa452989d7295111962f67a3f3a96d96bc408846
-- name: ceph_client
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
- version: 34a04f7b24c80297866bc5ab56618e2211b1d5f9
-- name: haproxy_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
- version: 9966fd96fede46c3b00c9e069e402eae90c66f17
-- name: keepalived
- scm: git
- src: https://github.com/evrardjp/ansible-keepalived
- version: 5deafcab39de162ac1550c58246963974e8dcf4e
-- name: lxc_container_create
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
- version: 68f81c679be88577633f98e8b9252a62bdcef754
-- name: lxc_hosts
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
- version: 84ac3442e542aeedf1396c88e0387b4ea1548eb1
-- name: memcached_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
- version: ae6f721dc0342e1e7b45ff2448ab51f7539dc01f
-- name: openstack_hosts
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
- version: 05c7f09d181de1809fd596cc0d879c49e3f86bbf
-- name: os_keystone
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
- version: cd9d4ef7d8614d241fa40ba33c1c205fd2b47fa1
-- name: openstack_openrc
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
- version: d594c2debc249daa5b7f6f2890f546093efd1ee5
-- name: os_aodh
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
- version: ce871dee75511f94bfd24dde8f97e573cf6d3ead
-- name: os_barbican
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
- version: c3e191037d0978479e3cb95a59b2986adab28c69
-- name: os_ceilometer
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
- version: 55bb04eaad4dd5c7fdad742b3557dc30dc9d45bf
-- name: os_cinder
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
- version: 536dd3446e0fc7fc68ab42b982ac9affc4215787
-- name: os_designate
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_designate
- version: a65d7a3394aef340ff94587dd0bb48133ed00763
-- name: os_glance
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_glance
- version: 43aa00424f233a6125f7a9216cec42da1d8ca4c5
-- name: os_gnocchi
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
- version: b1f7574dc529f8298a983d8d0e09520e90b571a8
-- name: os_heat
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_heat
- version: 0b3eb9348d55d6b1cf077a2c45b297f9a1be730d
-- name: os_horizon
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
- version: da72526dc1757688ecec8914344e330aaa0be720
-- name: os_ironic
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
- version: a90558f7a216e5e661c5d1a4048dbe30559542d1
-- name: os_magnum
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
- version: 736d1707339cb99396578018a6bda7af9184fb02
-- name: os_molteniron
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron
- version: 9b4c104a252c453bcd798fec9dbae7224b3d8001
-- name: os_neutron
- scm: git
- src: https://github.com/manuelbuil/openstack-ansible-os_neutron
- version: testing-ovs-nsh2
-- name: os_nova
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_nova
- version: 53df001c9034f198b9349def3c9158f8bbe43ff3
-- name: os_octavia
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_octavia
- version: 02ad3c68802287a1ba54cf10de085dcd14c324d8
-- name: os_rally
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_rally
- version: bc9075dba204e64d11cb397017d32b0c2297eed0
-- name: os_sahara
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
- version: 3c45121050ba21bd284f054d7b82a338f347157f
-- name: os_swift
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_swift
- version: f31217bb097519f15755f2337165657d7eb6b014
-- name: os_tacker
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_tacker
- version: d95902891c4e6200510509c066006c921cfff8df
-- name: os_tempest
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
- version: 866dedbcba180ca82c3c93823cef3db2d3241d1b
-- name: os_trove
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_trove
- version: b425fa316999d0863a44126f239a33d8c3fec3a6
-- name: plugins
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-plugins
- version: d2f60237761646968a4b39b15185fb5c84e7386f
-- name: rabbitmq_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
- version: 311f76890c8f99cb0b46958775d84de614609323
-- name: repo_build
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-repo_build
- version: 59a3f444c263235d8f0f584da8768656179fa02a
-- name: repo_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-repo_server
- version: 7889f37cdd2a90b4b98e8ef2e886f1fd4950fc0a
-- name: rsyslog_client
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
- version: 310cfe9506d3742be10790533ad0d16100d81498
-- name: rsyslog_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
- version: ba7bb699c0c874c7977add86ca308ca18be8f9a8
-- name: sshd
- scm: git
- src: https://github.com/willshersystems/ansible-sshd
- version: 537b9b2bc2fd7f23301222098344727f8161993c
-- name: bird
- scm: git
- src: https://github.com/logan2211/ansible-bird
- version: 5033c412398cf6f98097a9ac274a6f12810c807e
-- name: etcd
- scm: git
- src: https://github.com/logan2211/ansible-etcd
- version: 3933355dfe51477822db517d3c07ad561fb61318
-- name: unbound
- scm: git
- src: https://github.com/logan2211/ansible-unbound
- version: 7be67d6b60718896f0c17a7d4a14b912f72a59ae
-- name: resolvconf
- scm: git
- src: https://github.com/logan2211/ansible-resolvconf
- version: d48dd3eea22094b6ecc6aa6ea07279c8e68e28b5
-- name: ceph-defaults
- scm: git
- src: https://github.com/ceph/ansible-ceph-defaults
- version: 19884aaac1bc58921952af955c66602ccca89e93
-- name: ceph-common
- scm: git
- src: https://github.com/ceph/ansible-ceph-common
- version: 08804bd46dff42ebff64e7f27c86f2265fe4d6fc
-- name: ceph-config
- scm: git
- src: https://github.com/ceph/ansible-ceph-config
- version: e070537f443c3ae5d262835c8b0a7a992850283b
-- name: ceph-mon
- scm: git
- src: https://github.com/ceph/ansible-ceph-mon
- version: 309b7e339e057d56d9dd38bdd61998b900f45ba8
-- name: ceph-mgr
- scm: git
- src: https://github.com/ceph/ansible-ceph-mgr
- version: fe8f0864500b54cc7c9f897b871ba2cdf1d37096
-- name: ceph-osd
- scm: git
- src: https://github.com/ceph/ansible-ceph-osd
- version: e022d6773bc827e75ad051b429dec786a75d68f4
-- name: opendaylight
- scm: git
- src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight
- version: ef1367ad15ad10ac8cc9416f6fd49fd8b350d377
-- name: haproxy_endpoints
- scm: git
- src: https://github.com/logan2211/ansible-haproxy-endpoints
- version: 49901861b16b8afaa9bccdbc649ac956610ff22b
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml
deleted file mode 100644
index 4b0b9b8b..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml
+++ /dev/null
@@ -1,212 +0,0 @@
-- name: ansible-hardening
- scm: git
- src: https://git.openstack.org/openstack/ansible-hardening
- version: c05e36f48de66feb47046a0126d986fa03313f29
-- name: apt_package_pinning
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
- version: 9403a36513aee54c15890ac96c1f8c455f9c083d
-- name: pip_install
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-pip_install
- version: df107891bf9fdfa7287bdfe43f3fa0120a80e5ad
-- name: galera_client
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-galera_client
- version: 52b374547648056b58c544532296599801d501d7
-- name: galera_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-galera_server
- version: b124e06872ebeca7d81cb22fb80ae97a995b07a8
-- name: ceph_client
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
- version: 5fcbc68fdbd3105d233fd3c03c887f13227b1c3d
-- name: haproxy_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
- version: a905aaed8627f59d9dc10b9bc031589a7c65828f
-- name: keepalived
- scm: git
- src: https://github.com/evrardjp/ansible-keepalived
- version: 3.0.3
-- name: lxc_container_create
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
- version: c41d3b20da6be07d9bf5db7f7e6a1384c7cfb5eb
-- name: lxc_hosts
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
- version: d974c4db1696027899b28b2cb58800cae9a605e5
-- name: memcached_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
- version: 08c483f3c5d49c236194090534a015b67c8cded6
-- name: openstack_hosts
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
- version: a0d3b9c9756b6e95b0e034f3d0576fbb33607820
-- name: os_keystone
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
- version: ffc9c9b5e681748ff3e54e43f22c921e83342a51
-- name: openstack_openrc
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
- version: b27229ef168aed7f2febf6991b2d7459ec8883ee
-- name: os_aodh
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
- version: bcd77b1e10a7054e9365da6a20848b393153d025
-- name: os_barbican
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
- version: 0797e8bdadd2fcf4696b22f0e18340c8d9539b09
-- name: os_ceilometer
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
- version: 4b3e0589a0188de885659614ef4e076018af54f7
-- name: os_cinder
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
- version: 6f5ab34e5a0694f3fc84e63c912e00e86e3de280
-- name: os_designate
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_designate
- version: eac6d3c674397097d8adf722635252b1822c8f6c
-- name: os_glance
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_glance
- version: 47080919c937aace65fc7dc8e9670dbcfd910b88
-- name: os_gnocchi
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
- version: 5f8950f61ed6b61d1cc06ab73b3b02466bee0db1
-- name: os_heat
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_heat
- version: 4d1efae631026631fb2af4f43a9fe8ca210d643e
-- name: os_horizon
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
- version: 71aa69b1425f5b5b2bdc274357b62a9b4b57ae8f
-- name: os_ironic
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
- version: 34205b6b99fc3cfe54eddbcde0380e626976e425
-- name: os_magnum
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
- version: 0fdeea886ef4227e02d793f6dbfd54ccd9e6e088
-- name: os_molteniron
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron
- version: 58cff32e954ab817d07b8e0a136663c34d7f7b60
-- name: os_neutron
- scm: git
- src: https://github.com/manuelbuil/openstack-ansible-os_neutron
- version: pike-SFC-support
-- name: os_nova
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_nova
- version: 80e0d04822f7ddc5b8d574329e4eb8a76aea63ff
-- name: os_octavia
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_octavia
- version: 5fd1fbae703c17f928cfc00f60aeeed0500c6f2b
-- name: os_rally
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_rally
- version: b2658fb704fd3a1e8bce794b8bf87ac83931aa46
-- name: os_sahara
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
- version: e3e4f1bc8d72dd6fb7e26b8d0d364f9a60e16b0f
-- name: os_swift
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_swift
- version: 0bb5979de285305f652694cee139390a8102c134
-- name: os_tempest
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
- version: 0fb52fcd130bee25f40cd515da69948821d5b504
-- name: os_trove
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-os_trove
- version: 6596f6b28c88a88c89e293ea8f5f8551eb491fd1
-- name: plugins
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-plugins
- version: 11aed400f86951593bb60d1e853574b67894b0b3
-- name: rabbitmq_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
- version: fa80dfc0f8129e02f3f3b34bb7205889d3e5696c
-- name: repo_build
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-repo_build
- version: d0079ff721b0f9c4682d57eccfadb36f365eea2b
-- name: repo_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-repo_server
- version: 8302adcb11cad4e6245fd6bd1bbb4db08d3b60e9
-- name: rsyslog_client
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
- version: f41638370114412b97c6523b4c626ca70f0337f4
-- name: rsyslog_server
- scm: git
- src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
- version: 61a3ab251f63c6156f2a6604ee1a822070e19c90
-- name: sshd
- scm: git
- src: https://github.com/willshersystems/ansible-sshd
- version: 0.5.1
-- name: bird
- scm: git
- src: https://github.com/logan2211/ansible-bird
- version: '1.4'
-- name: etcd
- scm: git
- src: https://github.com/logan2211/ansible-etcd
- version: '1.3'
-- name: unbound
- scm: git
- src: https://github.com/logan2211/ansible-unbound
- version: '1.5'
-- name: resolvconf
- scm: git
- src: https://github.com/logan2211/ansible-resolvconf
- version: '1.3'
-- name: ceph-defaults
- scm: git
- src: https://github.com/ceph/ansible-ceph-defaults
- version: v3.0.8
-- name: ceph-common
- scm: git
- src: https://github.com/ceph/ansible-ceph-common
- version: v3.0.8
-- name: ceph-config
- scm: git
- src: https://github.com/ceph/ansible-ceph-config
- version: v3.0.8
-- name: ceph-mon
- scm: git
- src: https://github.com/ceph/ansible-ceph-mon
- version: v3.0.8
-- name: ceph-mgr
- scm: git
- src: https://github.com/ceph/ansible-ceph-mgr
- version: v3.0.8
-- name: ceph-osd
- scm: git
- src: https://github.com/ceph/ansible-ceph-osd
- version: v3.0.8
-- name: os_tacker
- scm: git
- src: https://github.com/manuelbuil/openstack-ansible-os_tacker
- version: pike-suse-support
-- name: opendaylight
- scm: git
- src: https://git.opendaylight.org/gerrit/p/integration/packaging/ansible-opendaylight.git
- version: 2af197bd13f77d2a07878b160c00f8ceeebb3c34
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/openstack_user_config.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/openstack_user_config.yml
new file mode 100644
index 00000000..899785dc
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/openstack_user_config.yml
@@ -0,0 +1,282 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.222
+ external_lb_vip_address: 192.168.122.220
+ barbican_keys_backend: true
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# barbican
+key-manager_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller01:
+ ip: 172.29.236.14
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller02:
+ ip: 172.29.236.15
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# tacker
+mano_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# ceilometer
+metering-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+ compute01:
+ ip: 172.29.236.13
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
+ controller01:
+ ip: 172.29.236.14
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
+ controller02:
+ ip: 172.29.236.15
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_pike.yml
deleted file mode 100644
index 002db2b1..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_pike.yml
+++ /dev/null
@@ -1,103 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# # Debug and Verbose options.
-debug: false
-
-haproxy_keepalived_external_vip_cidr: "192.168.122.220/32"
-haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32"
-haproxy_keepalived_external_interface: br-vlan
-haproxy_keepalived_internal_interface: br-mgmt
-gnocchi_db_sync_options: ""
-
-ovs_nsh_support: true
-cluster: true
-
-# Ensure the openvswitch kernel module is loaded
-# openstack_host_specific_kernel_modules:
-# - name: "openvswitch"
-# pattern: "CONFIG_OPENVSWITCH"
-# group: "network_hosts"
-
-# Use OpenDaylight SDN Controller
-neutron_plugin_type: "ml2.opendaylight"
-neutron_opendaylight_conf_ini_overrides:
- ml2_odl:
- username: "admin"
- password: "admin"
- port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ internal_lb_vip_address }}:8080/controller/nb/v2/neutron"
-neutron_plugin_base:
- - odl-router_v2
- - metering
- - networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin
- - networking_sfc.services.sfc.plugin.SfcPlugin
-provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- host_bind_override: "eth12"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/openstack_user_config.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/openstack_user_config.yml
new file mode 100644
index 00000000..4ae8a83f
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/openstack_user_config.yml
@@ -0,0 +1,186 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ barbican_keys_backend: true
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# barbican
+key-manager_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# tacker
+mano_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# ceilometer
+metering-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_pike.yml
deleted file mode 100644
index 4ee48807..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_pike.yml
+++ /dev/null
@@ -1,102 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# # Debug and Verbose options.
-debug: false
-
-haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
-haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
-haproxy_keepalived_external_interface: br-vlan
-haproxy_keepalived_internal_interface: br-mgmt
-gnocchi_db_sync_options: ""
-
-ovs_nsh_support: true
-
-# Ensure the openvswitch kernel module is loaded
-# openstack_host_specific_kernel_modules:
-# - name: "openvswitch"
-# pattern: "CONFIG_OPENVSWITCH"
-# group: "network_hosts"
-
-# Use OpenDaylight SDN Controller
-neutron_plugin_type: "ml2.opendaylight"
-neutron_opendaylight_conf_ini_overrides:
- ml2_odl:
- username: "admin"
- password: "admin"
- port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8080/controller/nb/v2/neutron"
-neutron_plugin_base:
- - odl-router_v2
- - metering
- - networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin
- - networking_sfc.services.sfc.plugin.SfcPlugin
-provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- host_bind_override: "eth12"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/openstack_user_config.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/openstack_user_config.yml
new file mode 100644
index 00000000..ed8ff8f5
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/openstack_user_config.yml
@@ -0,0 +1,188 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ barbican_keys_backend: true
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# barbican
+key-manager_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# tacker
+mano_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# ceilometer
+metering-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+ compute01:
+ ip: 172.29.236.13
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_pike.yml
deleted file mode 100644
index 4ee48807..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_pike.yml
+++ /dev/null
@@ -1,102 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# # Debug and Verbose options.
-debug: false
-
-haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
-haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
-haproxy_keepalived_external_interface: br-vlan
-haproxy_keepalived_internal_interface: br-mgmt
-gnocchi_db_sync_options: ""
-
-ovs_nsh_support: true
-
-# Ensure the openvswitch kernel module is loaded
-# openstack_host_specific_kernel_modules:
-# - name: "openvswitch"
-# pattern: "CONFIG_OPENVSWITCH"
-# group: "network_hosts"
-
-# Use OpenDaylight SDN Controller
-neutron_plugin_type: "ml2.opendaylight"
-neutron_opendaylight_conf_ini_overrides:
- ml2_odl:
- username: "admin"
- password: "admin"
- port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8080/controller/nb/v2/neutron"
-neutron_plugin_base:
- - odl-router_v2
- - metering
- - networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin
- - networking_sfc.services.sfc.plugin.SfcPlugin
-provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- host_bind_override: "eth12"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_openvswitch_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-master.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-master.yml
deleted file mode 100644
index f0743fc0..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-master.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-# Copyright 2017, Ericsson AB
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-networking_odl_git_repo: https://git.openstack.org/openstack/networking-odl
-networking_odl_git_install_branch: 7a3c5fee7deb01d9237f5d1cc43a17931999af02 # HEAD of "master" as of 24.11.2017
-networking_odl_project_group: neutron_all
-
-networking_sfc_git_repo: https://git.openstack.org/openstack/networking-sfc
-networking_sfc_git_install_branch: 899038b4d48c469af9f8c4982898478f32ba14a8 # HEAD of "master" as of 24.10.2017
-networking_sfc_project_group: neutron_all
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-pike.yml
deleted file mode 100644
index d4a0b931..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/defaults/repo_packages/opendaylight-pike.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-# Copyright 2017, Ericsson AB
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-networking_odl_git_repo: https://git.openstack.org/openstack/networking-odl
-networking_odl_git_install_branch: stable/pike
-networking_odl_project_group: neutron_all
-
-networking_sfc_git_repo: https://git.openstack.org/openstack/networking-sfc
-networking_sfc_git_install_branch: stable/pike
-networking_sfc_project_group: neutron_all
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/neutron.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/neutron.yml
deleted file mode 100644
index fd74d8ac..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/neutron.yml
+++ /dev/null
@@ -1,97 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-component_skel:
- neutron_agent:
- belongs_to:
- - neutron_all
- neutron_dhcp_agent:
- belongs_to:
- - neutron_all
- neutron_linuxbridge_agent:
- belongs_to:
- - neutron_all
- neutron_openvswitch_agent:
- belongs_to:
- - neutron_all
- neutron_metering_agent:
- belongs_to:
- - neutron_all
- neutron_l3_agent:
- belongs_to:
- - neutron_all
- neutron_lbaas_agent:
- belongs_to:
- - neutron_all
- neutron_bgp_dragent:
- belongs_to:
- - neutron_all
- neutron_metadata_agent:
- belongs_to:
- - neutron_all
- neutron_sriov_nic_agent:
- belongs_to:
- - neutron_all
- neutron_server:
- belongs_to:
- - neutron_all
- opendaylight:
- belongs_to:
- - neutron_all
- openvswitch_nsh:
- belongs_to:
- - neutron_all
-
-container_skel:
- neutron_agents_container:
- belongs_to:
- - network_containers
- contains:
- - neutron_agent
- - neutron_metadata_agent
- - neutron_metering_agent
- - neutron_linuxbridge_agent
- - neutron_openvswitch_agent
- - openvswitch_nsh
- - neutron_l3_agent
- - neutron_dhcp_agent
- - neutron_lbaas_agent
- - neutron_bgp_dragent
- properties:
- service_name: neutron
- neutron_server_container:
- belongs_to:
- - network_containers
- contains:
- - neutron_server
- - opendaylight
- properties:
- service_name: neutron
- neutron_networking_container:
- belongs_to:
- - network_containers
- contains:
- - openvswitch_nsh
- properties:
- is_metal: true
-
-
-physical_skel:
- network_containers:
- belongs_to:
- - all_containers
- network_hosts:
- belongs_to:
- - hosts
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/nova.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/nova.yml
deleted file mode 100644
index 1aee092e..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/openstack-ansible/playbooks/inventory_odl/env.d/nova.yml
+++ /dev/null
@@ -1,115 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-component_skel:
- nova_api_metadata:
- belongs_to:
- - nova_all
- nova_api_os_compute:
- belongs_to:
- - nova_all
- nova_api_placement:
- belongs_to:
- - nova_all
- nova_compute:
- belongs_to:
- - nova_all
- nova_conductor:
- belongs_to:
- - nova_all
- nova_scheduler:
- belongs_to:
- - nova_all
- nova_console:
- belongs_to:
- - nova_all
-
-
-container_skel:
- nova_api_metadata_container:
- belongs_to:
- - compute-infra_containers
- - os-infra_containers
- contains:
- - nova_api_metadata
- properties:
- service_name: nova
- nova_api_os_compute_container:
- belongs_to:
- - compute-infra_containers
- - os-infra_containers
- contains:
- - nova_api_os_compute
- properties:
- service_name: nova
- nova_api_placement_container:
- belongs_to:
- - compute-infra_containers
- - os-infra_containers
- contains:
- - nova_api_placement
- properties:
- service_name: nova
- nova_compute_container:
- belongs_to:
- - compute_containers
- contains:
- - neutron_linuxbridge_agent
- - neutron_openvswitch_agent
- - openvswitch_nsh
- - neutron_sriov_nic_agent
- - nova_compute
- properties:
- is_metal: true
- service_name: nova
- nova_conductor_container:
- belongs_to:
- - compute-infra_containers
- - os-infra_containers
- contains:
- - nova_conductor
- properties:
- service_name: nova
- nova_scheduler_container:
- belongs_to:
- - compute-infra_containers
- - os-infra_containers
- contains:
- - nova_scheduler
- properties:
- service_name: nova
- nova_console_container:
- belongs_to:
- - compute-infra_containers
- - os-infra_containers
- contains:
- - nova_console
- properties:
- service_name: nova
-
-
-physical_skel:
- compute-infra_containers:
- belongs_to:
- - all_containers
- compute-infra_hosts:
- belongs_to:
- - hosts
- compute_containers:
- belongs_to:
- - all_containers
- compute_hosts:
- belongs_to:
- - hosts
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/all_tacker.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/all_tacker.yml
deleted file mode 100644
index 0d6b15ec..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/all_tacker.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-# Tacker
-tacker_service_user_name: tacker
-tacker_service_tenant_name: service
-
-tacker_rabbitmq_userid: tacker
-tacker_rabbitmq_vhost: /tacker
-tacker_rabbitmq_port: "{{ rabbitmq_port }}"
-tacker_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
-tacker_rabbitmq_servers: "{{ rabbitmq_servers }}"
-tacker_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
-
-tacker_service_publicuri: "{{ openstack_service_publicuri_proto|default(tacker_service_proto) }}://{{ external_lb_vip_address }}:{{ tacker_service_port }}"
-tacker_service_adminurl: "{{ tacker_service_adminuri }}/"
-
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/haproxy_config.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/haproxy_config.yml
deleted file mode 100644
index 49b58360..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/haproxy_config.yml
+++ /dev/null
@@ -1,286 +0,0 @@
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-haproxy_default_services:
- - service:
- haproxy_service_name: galera
- haproxy_backend_nodes: "{{ [groups['galera_all'][0]] | default([]) }}" # list expected
- haproxy_backup_nodes: "{{ groups['galera_all'][1:] | default([]) }}"
- haproxy_bind: "{{ [internal_lb_vip_address] }}"
- haproxy_port: 3306
- haproxy_balance_type: tcp
- haproxy_timeout_client: 5000s
- haproxy_timeout_server: 5000s
- haproxy_backend_options:
- - "mysql-check user {{ galera_monitoring_user }}"
- haproxy_whitelist_networks: "{{ haproxy_galera_whitelist_networks }}"
- - service:
- haproxy_service_name: repo_git
- haproxy_backend_nodes: "{{ groups['repo_all'] | default([]) }}"
- haproxy_bind: "{{ [internal_lb_vip_address] }}"
- haproxy_port: 9418
- haproxy_balance_type: tcp
- haproxy_backend_options:
- - tcp-check
- haproxy_whitelist_networks: "{{ haproxy_repo_git_whitelist_networks }}"
- - service:
- haproxy_service_name: repo_all
- haproxy_backend_nodes: "{{ groups['repo_all'] | default([]) }}"
- haproxy_bind: "{{ [internal_lb_vip_address] }}"
- haproxy_port: 8181
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: repo_cache
- haproxy_backend_nodes: "{{ [groups['repo_all'][0]] | default([]) }}" # list expected
- haproxy_backup_nodes: "{{ groups['repo_all'][1:] | default([]) }}"
- haproxy_bind: "{{ [internal_lb_vip_address] }}"
- haproxy_port: "{{ repo_pkg_cache_port }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /acng-report.html"
- haproxy_whitelist_networks: "{{ haproxy_repo_cache_whitelist_networks }}"
- - service:
- haproxy_service_name: glance_api
- haproxy_backend_nodes: "{{ groups['glance_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 9292
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk /healthcheck"
- - service:
- haproxy_service_name: glance_registry
- haproxy_backend_nodes: "{{ groups['glance_registry'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 9191
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk /healthcheck"
- haproxy_whitelist_networks: "{{ haproxy_glance_registry_whitelist_networks }}"
- - service:
- haproxy_service_name: gnocchi
- haproxy_backend_nodes: "{{ groups['gnocchi_all'] | default([]) }}"
- haproxy_port: 8041
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk /healthcheck"
- - service:
- haproxy_service_name: heat_api_cfn
- haproxy_backend_nodes: "{{ groups['heat_api_cfn'] | default([]) }}"
- haproxy_port: 8000
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: heat_api_cloudwatch
- haproxy_backend_nodes: "{{ groups['heat_api_cloudwatch'] | default([]) }}"
- haproxy_port: 8003
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: heat_api
- haproxy_backend_nodes: "{{ groups['heat_api'] | default([]) }}"
- haproxy_port: 8004
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: keystone_service
- haproxy_backend_nodes: "{{ groups['keystone_all'] | default([]) }}"
- haproxy_port: 5000
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: "http"
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: keystone_admin
- haproxy_backend_nodes: "{{ groups['keystone_all'] | default([]) }}"
- haproxy_port: 35357
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: "http"
- haproxy_backend_options:
- - "httpchk HEAD /"
- haproxy_whitelist_networks: "{{ haproxy_keystone_admin_whitelist_networks }}"
- - service:
- haproxy_service_name: neutron_server
- haproxy_backend_nodes: "{{ groups['neutron_server'] | default([]) }}"
- haproxy_port: 9696
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk GET /"
- - service:
- haproxy_service_name: nova_api_metadata
- haproxy_backend_nodes: "{{ groups['nova_api_metadata'] | default([]) }}"
- haproxy_port: 8775
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- haproxy_whitelist_networks: "{{ haproxy_nova_metadata_whitelist_networks }}"
- - service:
- haproxy_service_name: nova_api_os_compute
- haproxy_backend_nodes: "{{ groups['nova_api_os_compute'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 8774
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: nova_api_placement
- haproxy_backend_nodes: "{{ groups['nova_api_placement'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 8780
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- haproxy_backend_httpcheck_options:
- - "expect status 401"
- - service:
- haproxy_service_name: nova_console
- haproxy_backend_nodes: "{{ groups['nova_console'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: "{{ nova_console_port }}"
- haproxy_balance_type: http
- haproxy_timeout_client: 60m
- haproxy_timeout_server: 60m
- haproxy_balance_alg: source
- haproxy_backend_options:
- - "httpchk HEAD /"
- haproxy_backend_httpcheck_options:
- - "expect status 404"
- - service:
- haproxy_service_name: cinder_api
- haproxy_backend_nodes: "{{ groups['cinder_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 8776
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: horizon
- haproxy_backend_nodes: "{{ groups['horizon_all'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_ssl_all_vips: true
- haproxy_port: "{{ haproxy_ssl | ternary(443,80) }}"
- haproxy_backend_port: 80
- haproxy_redirect_http_port: 80
- haproxy_balance_type: http
- haproxy_balance_alg: source
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: sahara_api
- haproxy_backend_nodes: "{{ groups['sahara_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_alg: source
- haproxy_port: 8386
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk /healthcheck"
- - service:
- haproxy_service_name: swift_proxy
- haproxy_backend_nodes: "{{ groups['swift_proxy'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_alg: source
- haproxy_port: 8080
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk /healthcheck"
- - service:
- haproxy_service_name: aodh_api
- haproxy_backend_nodes: "{{ groups['aodh_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 8042
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- haproxy_backend_httpcheck_options:
- - "expect status 401"
- - service:
- haproxy_service_name: ironic_api
- haproxy_backend_nodes: "{{ groups['ironic_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 6385
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk GET /"
- - service:
- haproxy_service_name: rabbitmq_mgmt
- haproxy_backend_nodes: "{{ groups['rabbitmq'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 15672
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- haproxy_whitelist_networks: "{{ haproxy_rabbitmq_management_whitelist_networks }}"
- - service:
- haproxy_service_name: magnum
- haproxy_backend_nodes: "{{ groups['magnum_all'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 9511
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk GET /"
- - service:
- haproxy_service_name: trove
- haproxy_backend_nodes: "{{ groups['trove_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 8779
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk HEAD /"
- - service:
- haproxy_service_name: barbican
- haproxy_backend_nodes: "{{ groups['barbican_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 9311
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk GET /"
- - service:
- haproxy_service_name: designate_api
- haproxy_backend_nodes: "{{ groups['designate_api'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 9001
- haproxy_balance_type: http
- haproxy_backend_options:
- - "forwardfor"
- - "httpchk /versions"
- - "httplog"
- - service:
- haproxy_service_name: octavia
- haproxy_backend_nodes: "{{ groups['octavia_all'] | default([]) }}"
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_port: 9876
- haproxy_balance_type: http
- haproxy_backend_options:
- - "httpchk GET /"
- haproxy_whitelist_networks: "{{ haproxy_octavia_whitelist_networks }}"
- - service:
- haproxy_service_name: tacker
- haproxy_backend_nodes: "{{ groups['tacker_all'] | default([]) }}"
- haproxy_port: 9890
- haproxy_ssl: "{{ haproxy_ssl }}"
- haproxy_balance_type: http
- haproxy_backend_options:
- - "forwardfor"
- - "httpchk"
- - "httplog"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_master.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_master.yml
deleted file mode 100644
index 86501634..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_master.yml
+++ /dev/null
@@ -1,222 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-## NOTICE on items in this file:
-## * If you use anything in the *._git_install_branch field that is not a TAG
-## make sure to leave an in-line comment as to "why".
-
-## For the sake of anyone else editing this file:
-## * If you add services to this file please do so in alphabetical order.
-## * Every entry should be name spaced with the name of the client followed by an "_"
-## * All items with this file should be separated by `name_` note that the name of the
-## package should be one long name with no additional `_` separating it.
-
-
-### Before this is shipped all of these services should have a tag set as the branch,
-### or have a comment / reason attached to them as to why a tag can not work.
-
-
-## Global Requirements
-requirements_git_repo: https://git.openstack.org/openstack/requirements
-requirements_git_install_branch: 691711c0effddd9cbaaadba3d494c15bc422fdd5 # HEAD of "master" as of 24.11.2017
-
-
-## Aodh service
-aodh_git_repo: https://git.openstack.org/openstack/aodh
-aodh_git_install_branch: 359043dc774be847cb539d18d13e336d40453e72 # HEAD of "master" as of 24.11.2017
-aodh_git_project_group: aodh_all
-
-
-## Barbican service
-barbican_git_repo: https://git.openstack.org/openstack/barbican
-barbican_git_install_branch: 5617d605f2e12840933e4a9d6417912cdbb811d5 # HEAD of "master" as of 24.11.2017
-barbican_git_project_group: barbican_all
-
-
-## Ceilometer service
-ceilometer_git_repo: https://git.openstack.org/openstack/ceilometer
-ceilometer_git_install_branch: bd464f1f572ba150f52e284de430d13045dc6c18 # HEAD of "master" as of 24.11.2017
-ceilometer_git_project_group: ceilometer_all
-
-
-## Cinder service
-cinder_git_repo: https://git.openstack.org/openstack/cinder
-cinder_git_install_branch: 80558687d0fa55f2adf699e7369ebe3dbc3591bf # HEAD of "master" as of 24.11.2017
-cinder_git_project_group: cinder_all
-
-
-## Designate service
-designate_git_repo: https://git.openstack.org/openstack/designate
-designate_git_install_branch: 2f75586379e8d611f37e06d385e79d0bc2c84ca1 # HEAD of "master" as of 24.11.2017
-designate_git_project_group: designate_all
-
-
-## Horizon Designate dashboard plugin
-designate_dashboard_git_repo: https://git.openstack.org/openstack/designate-dashboard
-designate_dashboard_git_install_branch: 571e127e5f853aa4dbdd377d831e32f8ff81eafe # HEAD of "master" as of 24.11.2017
-designate_dashboard_git_project_group: horizon_all
-
-
-## Dragonflow service
-dragonflow_git_repo: https://git.openstack.org/openstack/dragonflow
-dragonflow_git_install_branch: 7bf00cf315659252f03f6c65f6159a924da6f978 # HEAD of "master" as of 24.11.2017
-dragonflow_git_project_group: neutron_all
-
-
-## Glance service
-glance_git_repo: https://git.openstack.org/openstack/glance
-glance_git_install_branch: d88bd2ca8ef95810441dae640d3c6b9e79eca353 # HEAD of "master" as of 24.11.2017
-glance_git_project_group: glance_all
-
-
-## Heat service
-heat_git_repo: https://git.openstack.org/openstack/heat
-heat_git_install_branch: f4a06c2a92a361dbb401107b4ea1ab60972f473e # HEAD of "master" as of 24.11.2017
-heat_git_project_group: heat_all
-
-
-## Horizon service
-horizon_git_repo: https://git.openstack.org/openstack/horizon
-horizon_git_install_branch: 846d269d90e01e463b510474040e0ad984a5679f # HEAD of "master" as of 24.11.2017
-horizon_git_project_group: horizon_all
-
-## Horizon Ironic dashboard plugin
-ironic_dashboard_git_repo: https://git.openstack.org/openstack/ironic-ui
-ironic_dashboard_git_install_branch: d6199d51171e6c8700663b0b0618ee0adf033b4d # HEAD of "master" as of 24.11.2017
-ironic_dashboard_git_project_group: horizon_all
-
-## Horizon Magnum dashboard plugin
-magnum_dashboard_git_repo: https://git.openstack.org/openstack/magnum-ui
-magnum_dashboard_git_install_branch: 6160d903fae9c652b459c93c218e0ea75924a85d # HEAD of "master" as of 24.11.2017
-magnum_dashboard_git_project_group: horizon_all
-
-## Horizon LBaaS dashboard plugin
-neutron_lbaas_dashboard_git_repo: https://git.openstack.org/openstack/neutron-lbaas-dashboard
-neutron_lbaas_dashboard_git_install_branch: ef650294bcc7447d441e6a710c39d64e384e1b27 # HEAD of "master" as of 24.11.2017
-neutron_lbaas_dashboard_git_project_group: horizon_all
-
-## Horizon FWaaS dashboard plugin
-neutron_fwaas_dashboard_git_repo: https://git.openstack.org//openstack/neutron-fwaas-dashboard
-neutron_fwaas_dashboard_git_install_branch: 6de122d4753a6db24d2dc4c22a71e702ed980e82 # HEAD of "master" as of 24.11.2017
-neutron_fwaas_dashboard_git_project_group: horizon_all
-
-## Horizon Sahara dashboard plugin
-sahara_dashboard_git_repo: https://git.openstack.org/openstack/sahara-dashboard
-sahara_dashboard_git_install_branch: 3e5c59e6229dac8b303029058fcee9d61200ebc8 # HEAD of "master" as of 24.11.2017
-sahara_dashboard_git_project_group: horizon_all
-
-
-## Keystone service
-keystone_git_repo: https://git.openstack.org/openstack/keystone
-keystone_git_install_branch: 70fe4ec09b55def21361a32c8fa7f12e7c891ab1 # HEAD of "master" as of 24.11.2017
-keystone_git_project_group: keystone_all
-
-
-## Neutron service
-neutron_git_repo: https://git.openstack.org/openstack/neutron
-neutron_git_install_branch: d1277c1630570ca45b490c48371e3f7e97be78c3 # HEAD of "master" as of 24.11.2017
-neutron_git_project_group: neutron_all
-
-neutron_lbaas_git_repo: https://git.openstack.org/openstack/neutron-lbaas
-neutron_lbaas_git_install_branch: b1123e7a759248dfa63afdf8b86aafd692572ebd # HEAD of "master" as of 24.11.2017
-neutron_lbaas_git_project_group: neutron_all
-
-neutron_vpnaas_git_repo: https://git.openstack.org/openstack/neutron-vpnaas
-neutron_vpnaas_git_install_branch: 79e4eb81dd05588bcf68b92d46c62f0d26153542 # HEAD of "master" as of 24.11.2017
-neutron_vpnaas_git_project_group: neutron_all
-
-neutron_fwaas_git_repo: https://git.openstack.org/openstack/neutron-fwaas
-neutron_fwaas_git_install_branch: 74eac2ca2980e6162d9c88ee6bd48830386c392a # HEAD of "master" as of 24.11.2017
-neutron_fwaas_git_project_group: neutron_all
-
-neutron_dynamic_routing_git_repo: https://git.openstack.org/openstack/neutron-dynamic-routing
-neutron_dynamic_routing_git_install_branch: 183c3fa4840d22be1974534eb9e1b28b552f4a42 # HEAD of "master" as of 24.11.2017
-neutron_dynamic_routing_git_project_group: neutron_all
-
-networking_calico_git_repo: https://git.openstack.org/openstack/networking-calico
-networking_calico_git_install_branch: 9688df1a3d1d8b3fd9ba367e82fe6b0559416728 # HEAD of "master" as of 24.11.2017
-networking_calico_git_project_group: neutron_all
-
-## Nova service
-nova_git_repo: https://git.openstack.org/openstack/nova
-nova_git_install_branch: 22a790ef45b0523e8cf2ed97d14e050431c90fd9 # HEAD of "master" as of 24.11.2017
-nova_git_project_group: nova_all
-
-
-## PowerVM Virt Driver
-nova_powervm_git_repo: https://git.openstack.org/openstack/nova-powervm
-nova_powervm_git_install_branch: f2de4441e39b0f66cf31f854b228e9e7037f04de # HEAD of "master" as of 24.11.2017
-nova_powervm_git_project_group: nova_all
-
-
-## LXD Virt Driver
-nova_lxd_git_repo: https://git.openstack.org/openstack/nova-lxd
-nova_lxd_git_install_branch: e498de603b31c189fd32a6067d45a36575b96b0a # HEAD of "master" as of 24.11.2017
-nova_lxd_git_project_group: nova_all
-
-
-## Sahara service
-sahara_git_repo: https://git.openstack.org/openstack/sahara
-sahara_git_install_branch: 395856c513b1efad82db8fa78fb1cbfe0f3a6749 # HEAD of "master" as of 24.11.2017
-sahara_git_project_group: sahara_all
-
-
-## Swift service
-swift_git_repo: https://git.openstack.org/openstack/swift
-swift_git_install_branch: 3135878d2fe9909f49fcadeeb9cc6c6933d06127 # HEAD of "master" as of 24.11.2017
-swift_git_project_group: swift_all
-
-
-## Swift3 middleware
-swift_swift3_git_repo: https://git.openstack.org/openstack/swift3
-swift_swift3_git_install_branch: 1fb6a30ee59a16cd4b6c49bab963ff9e3f974580 # HEAD of "master" as of 24.11.2017
-swift_swift3_git_project_group: swift_all
-
-
-## Ironic service
-ironic_git_repo: https://git.openstack.org/openstack/ironic
-ironic_git_install_branch: 27ce77142bfb9ac56e85db37e0923a0eb47f2f7a # HEAD of "master" as of 24.11.2017
-ironic_git_project_group: ironic_all
-
-## Magnum service
-magnum_git_repo: https://git.openstack.org/openstack/magnum
-magnum_git_install_branch: 4bf3b3263870a4ec81cf372713cacec446b3ee84 # HEAD of "master" as of 24.11.2017
-magnum_git_project_group: magnum_all
-
-## Trove service
-trove_git_repo: https://git.openstack.org/openstack/trove
-trove_git_install_branch: b09d0eb3135047891a369d3c0eb2c6e9ae649f5b # HEAD of "master" as of 24.11.2017
-trove_git_project_group: trove_all
-
-## Horizon Trove dashboard plugin
-trove_dashboard_git_repo: https://git.openstack.org/openstack/trove-dashboard
-trove_dashboard_git_install_branch: 14a4609606d42cae827b8fc6b44453caea258976 # HEAD of "master" as of 24.11.2017
-trove_dashboard_git_project_group: horizon_all
-
-## Octavia service
-octavia_git_repo: https://git.openstack.org/openstack/octavia
-octavia_git_install_branch: bb9bb2d05b268cff9846e0a09ad3940be5fe5a80 # HEAD of "master" as of 24.11.2017
-octavia_git_project_group: octavia_all
-
-## Molteniron service
-molteniron_git_repo: https://git.openstack.org/openstack/molteniron
-molteniron_git_install_branch: 094276cda77d814d07ad885e7d63de8d1243750a # HEAD of "master" as of 24.11.2017
-molteniron_git_project_group: molteniron_all
-
-## Tacker service
-tacker_git_repo: https://git.openstack.org/openstack/tacker
-tacker_git_install_branch: cc03b5d952527b8cad2e2e309a97d55afb1ca559 # HEAD of "master" as of 24.11.2017
-tacker_git_project_group: tacker_all
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_pike.yml
deleted file mode 100644
index cecd7db1..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/openstack_services_pike.yml
+++ /dev/null
@@ -1,217 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-## NOTICE on items in this file:
-## * If you use anything in the *._git_install_branch field that is not a TAG
-## make sure to leave an in-line comment as to "why".
-
-## For the sake of anyone else editing this file:
-## * If you add services to this file please do so in alphabetical order.
-## * Every entry should be name spaced with the name of the client followed by an "_"
-## * All items with this file should be separated by `name_` note that the name of the
-## package should be one long name with no additional `_` separating it.
-
-
-### Before this is shipped all of these services should have a tag set as the branch,
-### or have a comment / reason attached to them as to why a tag can not work.
-
-
-## Global Requirements
-requirements_git_repo: https://git.openstack.org/openstack/requirements
-requirements_git_install_branch: 732861162db604622ac23ad65c070e3f69d0b44e # HEAD of "stable/pike" as of 10.11.2017
-
-
-## Aodh service
-aodh_git_repo: https://git.openstack.org/openstack/aodh
-aodh_git_install_branch: ed3ce41fa0ae0173601b683748265e502b84553b # HEAD of "stable/pike" as of 10.11.2017
-aodh_git_project_group: aodh_all
-
-
-## Barbican service
-barbican_git_repo: https://git.openstack.org/openstack/barbican
-barbican_git_install_branch: ec47f0358a17fde8fa1253253e21af07f72b7fa3 # HEAD of "stable/pike" as of 10.11.2017
-barbican_git_project_group: barbican_all
-
-
-## Ceilometer service
-ceilometer_git_repo: https://git.openstack.org/openstack/ceilometer
-ceilometer_git_install_branch: 8f10d547a4c4eeac0af2a5c833881dbe48c26464 # HEAD of "stable/pike" as of 10.11.2017
-ceilometer_git_project_group: ceilometer_all
-
-
-## Cinder service
-cinder_git_repo: https://git.openstack.org/openstack/cinder
-cinder_git_install_branch: 34928801b06e1162895a64c4e95c2f2692303a50 # HEAD of "stable/pike" as of 10.11.2017
-cinder_git_project_group: cinder_all
-
-
-## Designate service
-designate_git_repo: https://git.openstack.org/openstack/designate
-designate_git_install_branch: 6beba54a71510525d5bbc4956d20d27bffa982e5 # HEAD of "stable/pike" as of 10.11.2017
-designate_git_project_group: designate_all
-
-
-## Horizon Designate dashboard plugin
-designate_dashboard_git_repo: https://git.openstack.org/openstack/designate-dashboard
-designate_dashboard_git_install_branch: bc128a7c29a427933fc4ca94a7510ef8c97e5206 # HEAD of "stable/pike" as of 10.11.2017
-designate_dashboard_git_project_group: horizon_all
-
-
-## Dragonflow service
-dragonflow_git_repo: https://git.openstack.org/openstack/dragonflow
-dragonflow_git_install_branch: 84f1a26ff8e976b753593dc445e09a4c1a675a21 # Frozen HEAD of "master" as of 14.10.2017 (no stable/pike branch)
-dragonflow_git_project_group: neutron_all
-
-
-## Glance service
-glance_git_repo: https://git.openstack.org/openstack/glance
-glance_git_install_branch: 06af2eb5abe0332f7035a7d7c2fbfd19fbc4dae7 # HEAD of "stable/pike" as of 10.11.2017
-glance_git_project_group: glance_all
-
-
-## Heat service
-heat_git_repo: https://git.openstack.org/openstack/heat
-heat_git_install_branch: 31175a5641035abeec58c3f135ad09d3f231ac41 # HEAD of "stable/pike" as of 10.11.2017
-heat_git_project_group: heat_all
-
-
-## Horizon service
-horizon_git_repo: https://git.openstack.org/openstack/horizon
-horizon_git_install_branch: 246ff9f81248a00a434e66d18fad70519ba811cc # HEAD of "stable/pike" as of 10.11.2017
-horizon_git_project_group: horizon_all
-
-## Horizon Ironic dashboard plugin
-ironic_dashboard_git_repo: https://git.openstack.org/openstack/ironic-ui
-ironic_dashboard_git_install_branch: e2cba8ed8745b8ffcaa60d26ab69fd93f61582ad # HEAD of "stable/pike" as of 10.11.2017
-ironic_dashboard_git_project_group: horizon_all
-
-## Horizon Magnum dashboard plugin
-magnum_dashboard_git_repo: https://git.openstack.org/openstack/magnum-ui
-magnum_dashboard_git_install_branch: 0b9fc50aada1a3e214acaad1204b48c96a549e5f # HEAD of "stable/pike" as of 10.11.2017
-magnum_dashboard_git_project_group: horizon_all
-
-## Horizon LBaaS dashboard plugin
-neutron_lbaas_dashboard_git_repo: https://git.openstack.org/openstack/neutron-lbaas-dashboard
-neutron_lbaas_dashboard_git_install_branch: a5a05a27e7cab99dc379774f1d01c0076818e539 # HEAD of "stable/pike" as of 10.11.2017
-neutron_lbaas_dashboard_git_project_group: horizon_all
-
-## Horizon Sahara dashboard plugin
-sahara_dashboard_git_repo: https://git.openstack.org/openstack/sahara-dashboard
-sahara_dashboard_git_install_branch: 00c241d97bd3a116513580cfe8006480723d7c17 # HEAD of "stable/pike" as of 10.11.2017
-sahara_dashboard_git_project_group: horizon_all
-
-
-## Keystone service
-keystone_git_repo: https://git.openstack.org/openstack/keystone
-keystone_git_install_branch: d07677aba54362a4a3aa2d165b155105ffe30d73 # HEAD of "stable/pike" as of 10.11.2017
-keystone_git_project_group: keystone_all
-
-
-## Neutron service
-neutron_git_repo: https://git.openstack.org/openstack/neutron
-neutron_git_install_branch: bd64409bbb9465143ea6df9db4d53a7679599b69 # HEAD of "stable/pike" as of 10.11.2017
-neutron_git_project_group: neutron_all
-
-neutron_lbaas_git_repo: https://git.openstack.org/openstack/neutron-lbaas
-neutron_lbaas_git_install_branch: f0b6a85877ba9c31c41fc6c8b96ffd2b63e6afb9 # HEAD of "stable/pike" as of 10.11.2017
-neutron_lbaas_git_project_group: neutron_all
-
-neutron_vpnaas_git_repo: https://git.openstack.org/openstack/neutron-vpnaas
-neutron_vpnaas_git_install_branch: 60e4e7113b5fbbf28e97ebce2f40b7f1675200e6 # HEAD of "stable/pike" as of 10.11.2017
-neutron_vpnaas_git_project_group: neutron_all
-
-neutron_fwaas_git_repo: https://git.openstack.org/openstack/neutron-fwaas
-neutron_fwaas_git_install_branch: c2bafa999f7ea45687d5a3d42739e465564e99d1 # HEAD of "stable/pike" as of 10.11.2017
-neutron_fwaas_git_project_group: neutron_all
-
-neutron_dynamic_routing_git_repo: https://git.openstack.org/openstack/neutron-dynamic-routing
-neutron_dynamic_routing_git_install_branch: 9098d4447581117e857d2f86fb4a0508b5ffbb6a # HEAD of "stable/pike" as of 10.11.2017
-neutron_dynamic_routing_git_project_group: neutron_all
-
-networking_calico_git_repo: https://git.openstack.org/openstack/networking-calico
-networking_calico_git_install_branch: 9688df1a3d1d8b3fd9ba367e82fe6b0559416728 # HEAD of "master" as of 10.11.2017
-networking_calico_git_project_group: neutron_all
-
-## Nova service
-nova_git_repo: https://git.openstack.org/openstack/nova
-nova_git_install_branch: 8fdb1372138f8371a4d414deb38b86e9197b8649 # HEAD of "stable/pike" as of 10.11.2017
-nova_git_project_group: nova_all
-
-
-## PowerVM Virt Driver
-nova_powervm_git_repo: https://git.openstack.org/openstack/nova-powervm
-nova_powervm_git_install_branch: e0b516ca36fa5dfd38ae6f7ea97afd9a52f313ed # HEAD of "stable/pike" as of 10.11.2017
-nova_powervm_git_project_group: nova_all
-
-
-## LXD Virt Driver
-nova_lxd_git_repo: https://git.openstack.org/openstack/nova-lxd
-nova_lxd_git_install_branch: 9747c274138d9ef40512d5015e9e581f6bbec5d9 # HEAD of "stable/pike" as of 10.11.2017
-nova_lxd_git_project_group: nova_all
-
-
-## Sahara service
-sahara_git_repo: https://git.openstack.org/openstack/sahara
-sahara_git_install_branch: 3ee0da5ea09904125c44e1f9d1a9b83554b1a1cd # HEAD of "stable/pike" as of 10.11.2017
-sahara_git_project_group: sahara_all
-
-
-## Swift service
-swift_git_repo: https://git.openstack.org/openstack/swift
-swift_git_install_branch: 0344d6eb5afc723adc7bacf4b4e2aaf04da47548 # HEAD of "stable/pike" as of 10.11.2017
-swift_git_project_group: swift_all
-
-
-## Swift3 middleware
-swift_swift3_git_repo: https://git.openstack.org/openstack/swift3
-swift_swift3_git_install_branch: 1fb6a30ee59a16cd4b6c49bab963ff9e3f974580 # HEAD of "master" as of 10.11.2017
-swift_swift3_git_project_group: swift_all
-
-
-## Ironic service
-ironic_git_repo: https://git.openstack.org/openstack/ironic
-ironic_git_install_branch: c163e78629eac4e696ae62dc9a29a0fc77ca463f # HEAD of "stable/pike" as of 10.11.2017
-ironic_git_project_group: ironic_all
-
-## Magnum service
-magnum_git_repo: https://git.openstack.org/openstack/magnum
-magnum_git_install_branch: 839884593e6f6dabaebe401b013465c836fefc84 # HEAD of "stable/pike" as of 10.11.2017
-magnum_git_project_group: magnum_all
-
-## Trove service
-trove_git_repo: https://git.openstack.org/openstack/trove
-trove_git_install_branch: e6d4b4b3fe1768348c9df815940b97cecb5e7ee2 # HEAD of "stable/pike" as of 10.11.2017
-trove_git_project_group: trove_all
-
-## Horizon Trove dashboard plugin
-trove_dashboard_git_repo: https://git.openstack.org/openstack/trove-dashboard
-trove_dashboard_git_install_branch: 387c3358555ee539f7abbbf4875497497e12c265 # HEAD of "stable/pike" as of 10.11.2017
-trove_dashboard_git_project_group: horizon_all
-
-## Octavia service
-octavia_git_repo: https://git.openstack.org/openstack/octavia
-octavia_git_install_branch: 534e1f932cff19e6a54e256c56b7e3479755760d # HEAD of "stable/pike" as of 10.11.2017
-octavia_git_project_group: octavia_all
-
-## Molteniron service
-molteniron_git_repo: https://git.openstack.org/openstack/molteniron
-molteniron_git_install_branch: 094276cda77d814d07ad885e7d63de8d1243750a # HEAD of "master" as of 10.11.2017
-molteniron_git_project_group: molteniron_all
-
-## Tacker service
-tacker_git_repo: https://github.com/manuelbuil/tacker
-tacker_git_install_branch: pike-insecured-bug-fixed
-tacker_git_project_group: tacker_all
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/os-tacker-install.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/os-tacker-install.yml
deleted file mode 100644
index dd965951..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/os-tacker-install.yml
+++ /dev/null
@@ -1,63 +0,0 @@
----
-# Copyright 2017, SUSE LINUX GmbH.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-- name: Install the tacker components
- hosts: tacker_all
- gather_facts: "{{ gather_facts | default(True) }}"
- max_fail_percentage: 20
- user: root
- pre_tasks:
- - include: common-tasks/os-lxc-container-setup.yml
- - include: common-tasks/rabbitmq-vhost-user.yml
- static: no
- vars:
- user: "{{ tacker_rabbitmq_userid }}"
- password: "{{ tacker_rabbitmq_password }}"
- vhost: "{{ tacker_rabbitmq_vhost }}"
- _rabbitmq_host_group: "{{ tacker_rabbitmq_host_group }}"
- when:
- - inventory_hostname == groups['tacker_all'][0]
- - groups[tacker_rabbitmq_host_group] | length > 0
- - include: common-tasks/os-log-dir-setup.yml
- vars:
- log_dirs:
- - src: "/openstack/log/{{ inventory_hostname }}-tacker"
- dest: "/var/log/tacker"
- - include: common-tasks/mysql-db-user.yml
- static: no
- vars:
- user_name: "{{ tacker_galera_user }}"
- password: "{{ tacker_container_mysql_password }}"
- login_host: "{{ tacker_galera_address }}"
- db_name: "{{ tacker_galera_database }}"
- when: inventory_hostname == groups['tacker_all'][0]
- - include: common-tasks/package-cache-proxy.yml
- roles:
- - role: "os_tacker"
- - role: "openstack_openrc"
- tags:
- - openrc
- - role: "rsyslog_client"
- rsyslog_client_log_rotate_file: tacker_log_rotate
- rsyslog_client_log_dir: "/var/log/tacker"
- rsyslog_client_config_name: "99-tacker-rsyslog-client.conf"
- tags:
- - rsyslog
- vars:
- is_metal: "{{ properties.is_metal|default(false) }}"
- tacker_galera_address: "{{ internal_lb_vip_address }}"
- environment: "{{ deployment_environment_variables | default({}) }}"
- tags:
- - tacker
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/setup-openstack.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/setup-openstack.yml
deleted file mode 100644
index 94bb5291..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/setup-openstack.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-- include: os-keystone-install.yml
-- include: os-barbican-install.yml
-- include: os-glance-install.yml
-- include: os-cinder-install.yml
-- include: os-nova-install.yml
-- include: os-neutron-install.yml
-- include: os-heat-install.yml
-- include: os-horizon-install.yml
-- include: os-ceilometer-install.yml
-- include: os-aodh-install.yml
-- include: os-designate-install.yml
-#NOTE(stevelle) Ensure Gnocchi identities exist before Swift
-- include: os-gnocchi-install.yml
- when:
- - gnocchi_storage_driver is defined
- - gnocchi_storage_driver == 'swift'
- vars:
- gnocchi_identity_only: True
-- include: os-swift-install.yml
-- include: os-gnocchi-install.yml
-- include: os-tacker-install.yml
-- include: os-ironic-install.yml
-- include: os-magnum-install.yml
-- include: os-trove-install.yml
-- include: os-sahara-install.yml
-- include: os-molteniron-install.yml
-- include: os-octavia-install.yml
-- include: os-tempest-install.yml
- when: (tempest_install | default(False)) | bool or (tempest_run | default(False)) | bool
-
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker.yml
deleted file mode 100644
index 9ceabbc2..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-# Copyright 2017, SUSE Linux GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-component_skel:
- tacker_server:
- belongs_to:
- - tacker_all
-
-
-container_skel:
- tacker_container:
- belongs_to:
- - mano_containers
- contains:
- - tacker_server
-
-
-physical_skel:
- mano_containers:
- belongs_to:
- - all_containers
- mano_hosts:
- belongs_to:
- - hosts
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker_all.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker_all.yml
deleted file mode 100644
index 2a01a160..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/tacker_all.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-# Copyright 2017, SUSE LINUX GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-tacker_service_region: "{{ service_region }}"
-tacker_service_in_ldap: "{{ service_ldap_backend_enabled }}"
-
-tacker_aodh_enabled: "{{ groups['aodh_all'] is defined and groups['aodh_all'] | length > 0 }}"
-tacker_gnocchi_enabled: "{{ groups['gnocchi_all'] is defined and groups['gnocchi_all'] | length > 0 }}"
-
-# NOTE: these and their swift_all.yml counterpart should be moved back to all.yml once swift with tacker gets proper SSL support
-# swift_rabbitmq_telemetry_port: "{{ rabbitmq_port }}"
-# swift_rabbitmq_telemetry_use_ssl: "{{ rabbitmq_use_ssl }}"
-
-# Ensure that the package state matches the global setting
-tacker_package_state: "{{ package_state }}"
-
-# venv fetch configuration
-tacker_venv_tag: "{{ venv_tag }}"
-tacker_venv_download_url: "{{ venv_base_download_url }}/tacker-{{ openstack_release }}-{{ ansible_architecture | lower }}.tgz"
-
-# locations for fetching the default files from the git source
-tacker_git_config_lookup_location: "{{ openstack_repo_url }}/openstackgit/tacker"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/user_secrets.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/user_secrets.yml
deleted file mode 100644
index 50c7c0e8..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/user_secrets.yml
+++ /dev/null
@@ -1,163 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-############################# WARNING ########################################
-# The playbooks do not currently manage changing passwords in an existing
-# environment. Changing passwords and re-running the playbooks will fail
-# and may break your OpenStack environment.
-############################# WARNING ########################################
-
-
-## Rabbitmq Options
-rabbitmq_cookie_token:
-rabbitmq_monitoring_password:
-
-## Tokens
-memcached_encryption_key:
-
-## Galera Options
-galera_root_password:
-
-## Keystone Options
-keystone_container_mysql_password:
-keystone_auth_admin_password:
-keystone_service_password:
-keystone_rabbitmq_password:
-
-## Ceilometer Options:
-ceilometer_container_db_password:
-ceilometer_service_password:
-ceilometer_telemetry_secret:
-ceilometer_rabbitmq_password:
-
-## Aodh Options:
-aodh_container_db_password:
-aodh_service_password:
-aodh_rabbitmq_password:
-
-## Cinder Options
-cinder_container_mysql_password:
-cinder_service_password:
-cinder_profiler_hmac_key:
-cinder_rabbitmq_password:
-
-## Ceph/rbd: a UUID to be used by libvirt to refer to the client.cinder user
-cinder_ceph_client_uuid:
-
-## Glance Options
-glance_container_mysql_password:
-glance_service_password:
-glance_profiler_hmac_key:
-glance_rabbitmq_password:
-
-## Gnocchi Options:
-gnocchi_container_mysql_password:
-gnocchi_service_password:
-
-## Heat Options
-heat_stack_domain_admin_password:
-heat_container_mysql_password:
-### THE HEAT AUTH KEY NEEDS TO BE 32 CHARACTERS LONG ##
-heat_auth_encryption_key:
-### THE HEAT AUTH KEY NEEDS TO BE 32 CHARACTERS LONG ##
-heat_service_password:
-heat_rabbitmq_password:
-
-## Ironic options
-ironic_rabbitmq_password:
-ironic_container_mysql_password:
-ironic_service_password:
-ironic_swift_temp_url_secret_key:
-
-## Horizon Options
-horizon_container_mysql_password:
-horizon_secret_key:
-
-## Neutron Options
-neutron_container_mysql_password:
-neutron_service_password:
-neutron_rabbitmq_password:
-neutron_ha_vrrp_auth_password:
-
-## Nova Options
-nova_container_mysql_password:
-nova_api_container_mysql_password:
-nova_metadata_proxy_secret:
-nova_service_password:
-nova_rabbitmq_password:
-nova_placement_service_password:
-nova_placement_container_mysql_password:
-
-# LXD Options for nova compute
-lxd_trust_password:
-
-## Octavia Options
-octavia_container_mysql_password:
-octavia_service_password:
-octavia_health_hmac_key:
-octavia_rabbitmq_password:
-
-## Sahara Options
-sahara_container_mysql_password:
-sahara_rabbitmq_password:
-sahara_service_password:
-
-## Swift Options:
-swift_service_password:
-swift_dispersion_password:
-### Once the swift cluster has been setup DO NOT change these hash values!
-swift_hash_path_suffix:
-swift_hash_path_prefix:
-# Swift needs a telemetry password when using ceilometer
-swift_rabbitmq_telemetry_password:
-
-## haproxy stats password
-haproxy_stats_password:
-haproxy_keepalived_authentication_password:
-
-## Magnum Options
-magnum_service_password:
-magnum_galera_password:
-magnum_rabbitmq_password:
-magnum_trustee_password:
-
-## Rally Options:
-rally_galera_password:
-
-## Trove Options
-trove_galera_password:
-trove_rabbitmq_password:
-trove_service_password:
-trove_admin_user_password:
-trove_taskmanager_rpc_encr_key:
-trove_inst_rpc_key_encr_key:
-
-## Barbican Options
-barbican_galera_password:
-barbican_rabbitmq_password:
-barbican_service_password:
-
-## Designate Options
-designate_galera_password:
-designate_rabbitmq_password:
-designate_service_password:
-
-## Molteniron Options:
-molteniron_container_mysql_password:
-
-## Tacker options
-tacker_rabbitmq_password:
-tacker_service_password:
-tacker_container_mysql_password:
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-master.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-master.yml
deleted file mode 100644
index 1cffdf8e..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-master.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-- name: Add networking-odl and networking-sfc repos
- copy:
- src: openstack-ansible/playbooks/defaults/repo_packages/opendaylight-master.yml
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/defaults/repo_packages/opendaylight.yml"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-pike.yml
deleted file mode 100644
index 3396b83e..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/add-sfc-repos-and-inventory-pike.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-
-- name: Add networking-odl and networking-sfc repos
- copy:
- src: openstack-ansible/playbooks/defaults/repo_packages/opendaylight-pike.yml
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/defaults/repo_packages/opendaylight.yml"
-
-- name: Provide nova inventory which adds OVS-NSH hosts
- copy:
- src: openstack-ansible/playbooks/inventory_odl/env.d/nova.yml
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/inventory/env.d/nova.yml"
-
-- name: Provide neutron inventory which adds ODL hosts
- copy:
- src: openstack-ansible/playbooks/inventory_odl/env.d/neutron.yml
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/inventory/env.d/neutron.yml"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-config-files.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-config-files.yml
new file mode 100644
index 00000000..5d677d1c
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-config-files.yml
@@ -0,0 +1,20 @@
+---
+#
+
+- name: copy user_sfc_scenarios_variables.yml (SUSE)
+ template:
+ src: "{{xci_flavor}}/user_sfc_scenarios_variables_suse.yml.j2"
+ dest: "{{openstack_osa_etc_path}}/user_sfc_scenarios_variables.yml"
+ when: ansible_pkg_mgr == 'zypper'
+
+- name: copy user_sfc_scenarios_variables.yml (Ubuntu)
+ template:
+ src: "{{xci_flavor}}/user_sfc_scenarios_variables_ubuntu.yml.j2"
+ dest: "{{openstack_osa_etc_path}}/user_sfc_scenarios_variables.yml"
+ when: ansible_pkg_mgr == 'apt'
+
+# To get the mano_host & metering-infra_hosts variable for inventory
+- name: copy openstack_user_config.yml
+ copy:
+ src: "{{xci_flavor}}/openstack_user_config.yml"
+ dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-master.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-master.yml
deleted file mode 100644
index f58de4c2..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-master.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-#
-
-- name: copy user_sfc_scenarios_variables.yml (SUSE)
- copy:
- src: "{{XCI_FLAVOR}}/user_sfc_scenarios_variables_suse.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/user_sfc_scenarios_variables.yml"
- when: ansible_pkg_mgr == 'zypper'
-
-- name: copy user_sfc_scenarios_variables.yml (Ubuntu)
- copy:
- src: "{{XCI_FLAVOR}}/user_sfc_scenarios_variables_ubuntu.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/user_sfc_scenarios_variables.yml"
- when: ansible_pkg_mgr == 'apt'
-
-- name: copy OPNFV role requirements
- copy:
- src: "ansible-role-requirements-master.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/ansible-role-requirements.yml"
-
-- name: copy openstack_services.yml with tacker
- copy:
- src: "tacker_files/openstack_services_master.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/defaults/repo_packages/openstack_services.yml"
-
-# To get the mano_host variable (can only be defined here for the inventory)
-- name: copy openstack_user_config.yml
- copy:
- src: "tacker_files/{{XCI_FLAVOR}}/openstack_user_config.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/openstack_user_config.yml"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-pike.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-pike.yml
deleted file mode 100644
index 5459dfed..00000000
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/copy-OSA-files-pike.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-#
-
-- name: copy user_sfc_scenarios_variables.yml
- copy:
- src: "{{XCI_FLAVOR}}/user_sfc_scenarios_variables_pike.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/user_sfc_scenarios_variables.yml"
-
-- name: copy OPNFV role requirements
- copy:
- src: "ansible-role-requirements-pike.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/ansible-role-requirements.yml"
-
-- name: copy openstack_user_config.yml
- copy:
- src: "tacker_files/{{XCI_FLAVOR}}/openstack_user_config.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/openstack_user_config.yml"
-
-- name: copy tacker inventory file
- copy:
- src: "tacker_files/tacker.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/env.d/tacker.yml"
-
-- name: copy user_secrets.yml for tacker
- copy:
- src: "tacker_files/user_secrets.yml"
- dest: "{{OPENSTACK_OSA_ETC_PATH}}/user_secrets.yml"
-
-- name: copy haproxy_config.yml for tacker
- copy:
- src: "tacker_files/haproxy_config.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/group_vars/all/haproxy_config.yml"
-
-- name: copy openstack_services.yml with tacker
- copy:
- src: "tacker_files/openstack_services_pike.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/defaults/repo_packages/openstack_services.yml"
-
-- name: copy all/tacker.yml
- copy:
- src: "tacker_files/all_tacker.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/group_vars/all/tacker.yml"
-
-- name: copy tacker_all.yml
- copy:
- src: "tacker_files/tacker_all.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/group_vars/tacker_all.yml"
-
-- name: copy setup-openstack.yml
- copy:
- src: "tacker_files/setup-openstack.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/setup-openstack.yml"
-
-- name: copy os-tacker-install.yml
- copy:
- src: "tacker_files/os-tacker-install.yml"
- dest: "{{OPENSTACK_OSA_PATH}}/playbooks/os-tacker-install.yml"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml
index 819ef203..f3b4e736 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/main.yml
@@ -8,18 +8,5 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- name: Add SFC repos and inventory for Pike
- include: add-sfc-repos-and-inventory-pike.yml
- when: OPENSTACK_OSA_VERSION == "stable/pike"
-
-- name: Add SFC repos and inventory for master
- include: add-sfc-repos-and-inventory-master.yml
- when: OPENSTACK_OSA_VERSION != "stable/pike"
-
-- name: Copy the OSA not-yet-upstreamed files for Pike
- include: copy-OSA-files-pike.yml
- when: OPENSTACK_OSA_VERSION == "stable/pike"
-
-- name: Copy the OSA not-yet-upstreamed files for master
- include: copy-OSA-files-master.yml
- when: OPENSTACK_OSA_VERSION != "stable/pike"
+- name: Copy the OSA config files
+ include: copy-OSA-config-files.yml
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/post-deployment.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/post-deployment.yml
new file mode 100644
index 00000000..837a8ee3
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/tasks/post-deployment.yml
@@ -0,0 +1,17 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: "Fetch the ip of the neutron server container"
+ shell: 'grep controller00_neutron_server_container -n1 /etc/openstack_deploy/openstack_inventory.json | grep ansible_host | cut -d":" -f2 | cut -d "\"" -f2'
+ register: ip
+ change_when: False
+
+- name: Fetch the ml2_conf.ini to process ODL variables
+ command: "scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {{ ip.stdout }}:/etc/neutron/plugins/ml2/ml2_conf.ini /tmp/ml2_conf.ini"
+ change_when: False
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_suse.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_suse.yml.j2
index 0af72600..8cec75c3 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_suse.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_suse.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -43,7 +44,7 @@ neutron_opendaylight_conf_ini_overrides:
username: "admin"
password: "admin"
port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8080/controller/nb/v2/neutron"
+ url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8180/controller/nb/v2/neutron"
neutron_plugin_base:
- odl-router_v2
- metering
@@ -115,3 +116,11 @@ openstack_host_specific_kernel_modules:
- name: openvswitch
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2 b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2
new file mode 100644
index 00000000..c5b1f19b
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -0,0 +1,131 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+{% raw %}
+# # Debug and Verbose options.
+debug: false
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
+gnocchi_db_sync_options: ""
+
+ovs_nsh_support: true
+cluster: true
+
+# Ensure the openvswitch kernel module is loaded
+# openstack_host_specific_kernel_modules:
+# - name: "openvswitch"
+# pattern: "CONFIG_OPENVSWITCH"
+# group: "network_hosts"
+
+# Use OpenDaylight SDN Controller
+neutron_plugin_type: "ml2.opendaylight"
+neutron_opendaylight_conf_ini_overrides:
+ ml2_odl:
+ username: "admin"
+ password: "admin"
+ port_binding_controller: "pseudo-agentdb-binding"
+ url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8180/controller/nb/v2/neutron"
+neutron_plugin_base:
+ - odl-router_v2
+ - metering
+ - networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin
+ - networking_sfc.services.sfc.plugin.SfcPlugin
+provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ host_bind_override: "eth12"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# This repo is used for OVS 2.9.2
+user_external_repos_list:
+ - repo: 'deb http://ppa.launchpad.net/mardim/mardim-ppa/ubuntu xenial main'
+
+user_external_repo_keys_list:
+ - id: 6E2EEDF1A3925D9D727EB1176FAD8BA42AAAEB9F
+ keyserver: keyserver.ubuntu.com
+
+openstack_host_specific_kernel_modules:
+ - name: openvswitch
+
+ovs_nsh_required_metal_packages:
+ - python-six
+ - python3-six
+ - linux-headers-{{ ansible_kernel }}
+ - openvswitch-datapath-dkms
+
+openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_suse.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_suse.yml.j2
index 0962df7e..6c46b963 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_suse.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_suse.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -42,7 +43,7 @@ neutron_opendaylight_conf_ini_overrides:
username: "admin"
password: "admin"
port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8080/controller/nb/v2/neutron"
+ url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8180/controller/nb/v2/neutron"
neutron_plugin_base:
- odl-router_v2
- metering
@@ -114,3 +115,11 @@ openstack_host_specific_kernel_modules:
- name: openvswitch
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_ubuntu.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2
index 86529562..0194456e 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_ubuntu.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -42,7 +43,7 @@ neutron_opendaylight_conf_ini_overrides:
username: "admin"
password: "admin"
port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8080/controller/nb/v2/neutron"
+ url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8180/controller/nb/v2/neutron"
neutron_plugin_base:
- odl-router_v2
- metering
@@ -101,14 +102,29 @@ provider_networks:
- cinder_volume
- nova_compute
+# This repo is for ovs 2.9.2
user_external_repos_list:
- - repo: 'ppa:mardim/mardim-ppa'
+ - repo: 'deb http://ppa.launchpad.net/mardim/mardim-ppa/ubuntu xenial main'
+
+user_external_repo_keys_list:
+ - id: 6E2EEDF1A3925D9D727EB1176FAD8BA42AAAEB9F
+ keyserver: keyserver.ubuntu.com
openstack_host_specific_kernel_modules:
- name: openvswitch
ovs_nsh_required_metal_packages:
+ - python-six
+ - python3-six
- linux-headers-{{ ansible_kernel }}
- openvswitch-datapath-dkms
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_suse.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_suse.yml.j2
index 0962df7e..6c46b963 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/noha/user_sfc_scenarios_variables_suse.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_suse.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -42,7 +43,7 @@ neutron_opendaylight_conf_ini_overrides:
username: "admin"
password: "admin"
port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8080/controller/nb/v2/neutron"
+ url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8180/controller/nb/v2/neutron"
neutron_plugin_base:
- odl-router_v2
- metering
@@ -114,3 +115,11 @@ openstack_host_specific_kernel_modules:
- name: openvswitch
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_ubuntu.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2
index 86529562..1ec821d5 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/mini/user_sfc_scenarios_variables_ubuntu.yml
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -42,7 +43,7 @@ neutron_opendaylight_conf_ini_overrides:
username: "admin"
password: "admin"
port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8080/controller/nb/v2/neutron"
+ url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8180/controller/nb/v2/neutron"
neutron_plugin_base:
- odl-router_v2
- metering
@@ -101,14 +102,29 @@ provider_networks:
- cinder_volume
- nova_compute
+# This repo is used for ovs 2.9.2
user_external_repos_list:
- - repo: 'ppa:mardim/mardim-ppa'
+ - repo: 'deb http://ppa.launchpad.net/mardim/mardim-ppa/ubuntu xenial main'
+
+user_external_repo_keys_list:
+ - id: 6E2EEDF1A3925D9D727EB1176FAD8BA42AAAEB9F
+ keyserver: keyserver.ubuntu.com
openstack_host_specific_kernel_modules:
- name: openvswitch
ovs_nsh_required_metal_packages:
+ - python-six
+ - python3-six
- linux-headers-{{ ansible_kernel }}
- openvswitch-datapath-dkms
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/vars/main.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/vars/main.yml
new file mode 100644
index 00000000..629b50c7
--- /dev/null
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/vars/main.yml
@@ -0,0 +1,2 @@
+---
+odl_repo_version: "{{ lookup('env','ODL_VERSION') }}"
diff --git a/scenarios/os-odl-sfc/xci_overrides b/scenarios/os-odl-sfc/xci_overrides
index 0f8f7436..ecbff0ee 100644
--- a/scenarios/os-odl-sfc/xci_overrides
+++ b/scenarios/os-odl-sfc/xci_overrides
@@ -3,3 +3,8 @@ if [[ $XCI_FLAVOR == "ha" ]]; then
else
export VM_MEMORY_SIZE=16384
fi
+
+# Until this feature is developed, ODL_VERSION must be intialized:
+# https://github.com/ansible/ansible/issues/17329
+# otherwise the lookup in vars/main returns an empty string when not defined
+export ODL_VERSION=${ODL_VERSION:-latest_release}
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/README b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/README
new file mode 100644
index 00000000..b65c1d52
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/README
@@ -0,0 +1,12 @@
+This is the role which deploys the os-odl-sfc_osm scenario in xci.
+
+This role currently works with:
+
+- OpenStack stable/rocky
+- ODL Fluorine
+- OVS 2.9.2
+- OSM master
+- Ubuntu 16.04
+
+Follow this link:
+https://wiki.opnfv.org/display/sfc/Deploy+OPNFV+SFC+scenarios
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/defaults/main.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/defaults/main.yml
new file mode 100644
index 00000000..3e9829cc
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/defaults/main.yml
@@ -0,0 +1,22 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+lxd_bridge: "lxdbr0"
+lxd_domain: "lxd"
+lxd_ipv4_addr: "10.0.8.1"
+lxd_ipv4_netmask: "255.255.255.0"
+lxd_ipv4_network: "10.0.8.1/24"
+lxd_ipv4_dhcp_range: "10.0.8.2,10.0.8.254"
+lxd_ipv4_dhcp_max: "250"
+lxd_ipv4_nat: "true"
+lxd_ipv6_addr: ""
+lxd_ipv6_mask: ""
+lxd_ipv6_network: ""
+lxd_ipv6_nat: "false"
+lxd_ipv6_proxy: "false"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/ha/openstack_user_config.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/ha/openstack_user_config.yml
index 6d2b490a..f36f6502 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/ha/openstack_user_config.yml
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/ha/openstack_user_config.yml
@@ -13,6 +13,7 @@ used_ips:
global_overrides:
internal_lb_vip_address: 172.29.236.222
external_lb_vip_address: 192.168.122.220
+ barbican_keys_backend: true
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
@@ -76,18 +77,18 @@ shared-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# repository (apt cache, python packages, etc)
repo-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
@@ -96,9 +97,9 @@ haproxy_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# rsyslog server
# log_hosts:
@@ -114,18 +115,27 @@ identity_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
+
+# barbican
+key-manager_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.14
+ controller02:
+ ip: 172.29.236.15
# cinder api services
storage-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# glance
# The settings here are repeated for each infra host.
@@ -138,27 +148,27 @@ image_hosts:
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
container_vars:
limit_container_types: glance
glance_nfs_client:
- - server: "172.29.244.14"
+ - server: "172.29.244.12"
remote_path: "/images"
local_path: "/var/lib/glance/images"
type: "nfs"
@@ -169,52 +179,52 @@ compute-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# heat
orchestration_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# horizon
dashboard_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
-# tacker
-mano_hosts:
+# ceilometer
+metering-infra_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# neutron server, agents (L3, etc)
network_hosts:
controller00:
ip: 172.29.236.11
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
# nova hypervisors
compute_hosts:
compute00:
- ip: 172.29.236.14
+ ip: 172.29.236.12
compute01:
- ip: 172.29.236.15
+ ip: 172.29.236.13
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
@@ -233,10 +243,10 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
controller01:
- ip: 172.29.236.12
+ ip: 172.29.236.14
container_vars:
cinder_backends:
limit_container_types: cinder_volume
@@ -246,10 +256,10 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
controller02:
- ip: 172.29.236.13
+ ip: 172.29.236.15
container_vars:
cinder_backends:
limit_container_types: cinder_volume
@@ -259,5 +269,5 @@ storage_hosts:
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- - ip: "172.29.244.14"
+ - ip: "172.29.244.12"
share: "/volumes"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/mini/openstack_user_config.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/mini/openstack_user_config.yml
index ac17d89d..09d6aa37 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/mini/openstack_user_config.yml
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/mini/openstack_user_config.yml
@@ -13,6 +13,7 @@ used_ips:
global_overrides:
internal_lb_vip_address: 172.29.236.11
external_lb_vip_address: 192.168.122.3
+ barbican_keys_backend: true
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
@@ -102,6 +103,11 @@ identity_hosts:
controller00:
ip: 172.29.236.11
+# barbican
+key-manager_hosts:
+ controller00:
+ ip: 172.29.236.11
+
# cinder api services
storage-infra_hosts:
controller00:
@@ -139,8 +145,8 @@ dashboard_hosts:
controller00:
ip: 172.29.236.11
-# tacker
-mano_hosts:
+# ceilometer
+metering-infra_hosts:
controller00:
ip: 172.29.236.11
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/noha/openstack_user_config.yml
index ee8889d2..d914991e 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/files/noha/openstack_user_config.yml
@@ -13,6 +13,7 @@ used_ips:
global_overrides:
internal_lb_vip_address: 172.29.236.11
external_lb_vip_address: 192.168.122.3
+ barbican_keys_backend: true
tunnel_bridge: "br-vxlan"
management_bridge: "br-mgmt"
provider_networks:
@@ -102,6 +103,11 @@ identity_hosts:
controller00:
ip: 172.29.236.11
+# barbican
+key-manager_hosts:
+ controller00:
+ ip: 172.29.236.11
+
# cinder api services
storage-infra_hosts:
controller00:
@@ -139,8 +145,8 @@ dashboard_hosts:
controller00:
ip: 172.29.236.11
-# tacker
-mano_hosts:
+# ceilometer
+metering-infra_hosts:
controller00:
ip: 172.29.236.11
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/configure-opnfvhost.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/configure-opnfvhost.yml
new file mode 100644
index 00000000..3a0226b0
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/configure-opnfvhost.yml
@@ -0,0 +1,74 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: install OSM required packages
+ package:
+ name: "{{ osm_required_packages[ansible_pkg_mgr] }}"
+ state: present
+
+- name: initialize LXD
+ command: "{{ item }}"
+ with_items:
+ - lxd init --auto
+ - lxd waitready
+ changed_when: False
+- name: stop lxd-bridge service
+ systemd:
+ name: lxd-bridge
+ state: stopped
+ daemon_reload: yes
+- name: create lxd-bridge configuration
+ template:
+ src: lxd-bridge.j2
+ dest: /etc/default/lxd-bridge
+ mode: 0755
+
+- name: ensure dnsmasq service is stopped before attempting to start lxd-bridge
+ service:
+ name: dnsmasq
+ state: stopped
+
+- name: ensure dnsmasq uses interface br-vlan for lxd-bridge
+ lineinfile:
+ path: /etc/dnsmasq.conf
+ regexp: '^interface='
+ line: 'interface=br-vlan'
+
+- name: ensure docker and lxd-bridge services are started and enabled
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items:
+ - docker
+ - lxd-bridge
+
+- name: get default interface
+ shell: route -n | awk '$1~/^0.0.0.0/ {print $8}'
+ register: default_interface
+ ignore_errors: False
+ changed_when: False
+
+- name: get mtu of the default interface {{ default_interface.stdout }}
+ shell: ip addr show {{ default_interface.stdout }} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}'
+ register: default_interface_mtu
+ ignore_errors: False
+ changed_when: False
+
+- name: set lxdbr0 mtu to {{ default_interface_mtu.stdout }}
+ command: ifconfig lxdbr0 mtu {{ default_interface_mtu.stdout }}
+ ignore_errors: False
+ changed_when: False
+
+- name: add devuser to lxd and docker groups
+ user:
+ name: devuser
+ groups: lxd, docker
+ append: yes
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/copy-OSA-config-files.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/copy-OSA-config-files.yml
new file mode 100644
index 00000000..96592051
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/copy-OSA-config-files.yml
@@ -0,0 +1,20 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Venkata Harshavardhan Reddy Allu and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: copy user_sfc_scenarios_variables.yml (Ubuntu)
+ template:
+ src: "{{xci_flavor}}/user_sfc_scenarios_variables_ubuntu.yml.j2"
+ dest: "{{openstack_osa_etc_path}}/user_sfc_scenarios_variables.yml"
+ when: ansible_pkg_mgr == 'apt'
+
+- name: copy openstack_user_config.yml
+ copy:
+ src: "{{xci_flavor}}/openstack_user_config.yml"
+ dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/install-osm.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/install-osm.yml
new file mode 100644
index 00000000..5c12e333
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/install-osm.yml
@@ -0,0 +1,32 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: clone OSM devops repo and checkout version {{ osm_devops_version }}
+ become_user: "{{ osm_install_user }}"
+ become: yes
+ git:
+ repo: "{{ osm_devops_git_url }}"
+ dest: "{{ osm_devops_clone_location }}"
+ version: "{{ osm_devops_version }}"
+
+- name: install OSM
+ become_user: "{{ osm_install_user }}"
+ become: yes
+ command: "/bin/bash ./full_install_osm.sh --test -b {{ osm_devops_version }} --nolxd -y"
+ args:
+ chdir: "{{ osm_devops_clone_location }}/installers"
+ creates: "/usr/bin/osm"
+
+- name: create osmrc file
+ copy:
+ dest: "{{ osmrc_file_dest }}"
+ content: |
+ export OSM_HOSTNAME=127.0.0.1
+ export OSM_OL005=True
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/main.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/main.yml
new file mode 100644
index 00000000..e8a3ea7f
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 SUSE Linux GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: Copy the OSA config files
+ include: copy-OSA-config-files.yml
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/post-deployment.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/post-deployment.yml
new file mode 100644
index 00000000..a181ce77
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/post-deployment.yml
@@ -0,0 +1,27 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 SUSE Linux GmbH and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: Configure opnfv host
+ include: configure-opnfvhost.yml
+
+- name: Install OSM
+ include: install-osm.yml
+
+- name: Register OpenStack as VIM
+ include: register-vim.yml
+
+# fetch ODL variables for functest
+- name: Fetch the ip of the neutron server container
+ shell: 'grep controller00_neutron_server_container -n1 /etc/openstack_deploy/openstack_inventory.json | grep ansible_host | cut -d":" -f2 | cut -d "\"" -f2'
+ register: ip
+ changed_when: False
+- name: Fetch the ml2_conf.ini to process ODL variables
+ command: "scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {{ ip.stdout }}:/etc/neutron/plugins/ml2/ml2_conf.ini /tmp/ml2_conf.ini"
+ changed_when: False
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/register-vim.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/register-vim.yml
new file mode 100644
index 00000000..07e044bf
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/tasks/register-vim.yml
@@ -0,0 +1,30 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Venkata Harshavardhan Reddy Allu and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# This is a simple fix to wait for the OSM services in
+# the docker containers to start functioning as expected.
+# TODO: Once healthchecks are added to the OSM
+# container stack, use them to identify the status
+# of the containers and modify this task.
+- name: Wait till the OSM services are ready
+ wait_for: timeout=120
+ delegate_to: localhost
+
+- name: Register OpenStack as VIM
+ shell: ". {{ osmrc_file_dest }} ;
+ osm vim-create \
+ --name openstack-site \
+ --user admin \
+ --password {{ openrc_os_password }} \
+ --tenant admin \
+ --account_type openstack \
+ --auth_url {{ openrc_os_auth_url }} \
+ --config='{insecure: true}'"
+ changed_when: False
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2 b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2
new file mode 100644
index 00000000..c5b1f19b
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/ha/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -0,0 +1,131 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+{% raw %}
+# # Debug and Verbose options.
+debug: false
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
+gnocchi_db_sync_options: ""
+
+ovs_nsh_support: true
+cluster: true
+
+# Ensure the openvswitch kernel module is loaded
+# openstack_host_specific_kernel_modules:
+# - name: "openvswitch"
+# pattern: "CONFIG_OPENVSWITCH"
+# group: "network_hosts"
+
+# Use OpenDaylight SDN Controller
+neutron_plugin_type: "ml2.opendaylight"
+neutron_opendaylight_conf_ini_overrides:
+ ml2_odl:
+ username: "admin"
+ password: "admin"
+ port_binding_controller: "pseudo-agentdb-binding"
+ url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8180/controller/nb/v2/neutron"
+neutron_plugin_base:
+ - odl-router_v2
+ - metering
+ - networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin
+ - networking_sfc.services.sfc.plugin.SfcPlugin
+provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ host_bind_override: "eth12"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# This repo is used for OVS 2.9.2
+user_external_repos_list:
+ - repo: 'deb http://ppa.launchpad.net/mardim/mardim-ppa/ubuntu xenial main'
+
+user_external_repo_keys_list:
+ - id: 6E2EEDF1A3925D9D727EB1176FAD8BA42AAAEB9F
+ keyserver: keyserver.ubuntu.com
+
+openstack_host_specific_kernel_modules:
+ - name: openvswitch
+
+ovs_nsh_required_metal_packages:
+ - python-six
+ - python3-six
+ - linux-headers-{{ ansible_kernel }}
+ - openvswitch-datapath-dkms
+
+openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/lxd-bridge.j2 b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/lxd-bridge.j2
new file mode 100644
index 00000000..707cc465
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/lxd-bridge.j2
@@ -0,0 +1,16 @@
+USE_LXD_BRIDGE="true"
+LXD_BRIDGE="{{ lxd_bridge }}"
+UPDATE_PROFILE="true"
+LXD_CONFILE=""
+LXD_DOMAIN="{{ lxd_domain }}"
+LXD_IPV4_ADDR="{{ lxd_ipv4_addr }}"
+LXD_IPV4_NETMASK="{{ lxd_ipv4_netmask }}"
+LXD_IPV4_NETWORK="{{ lxd_ipv4_network }}"
+LXD_IPV4_DHCP_RANGE="{{ lxd_ipv4_dhcp_range }}"
+LXD_IPV4_DHCP_MAX="{{ lxd_ipv4_dhcp_max }}"
+LXD_IPV4_NAT="{{ lxd_ipv4_nat }}"
+LXD_IPV6_ADDR="{{ lxd_ipv6_addr }}"
+LXD_IPV6_MASK="{{ lxd_ipv6_mask }}"
+LXD_IPV6_NETWORK="{{ lxd_ipv6_network }}"
+LXD_IPV6_NAT="{{ lxd_ipv6_nat }}"
+LXD_IPV6_PROXY="{{ lxd_ipv6_proxy }}"
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_ubuntu.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2
index f58a1794..0194456e 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/files/ha/user_sfc_scenarios_variables_ubuntu.yml
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/mini/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -18,6 +18,7 @@
# ## the defaults for each role to find additional override options.
# ##
+{% raw %}
# # Debug and Verbose options.
debug: false
@@ -28,7 +29,6 @@ haproxy_keepalived_internal_interface: br-mgmt
gnocchi_db_sync_options: ""
ovs_nsh_support: true
-cluster: true
# Ensure the openvswitch kernel module is loaded
# openstack_host_specific_kernel_modules:
@@ -43,7 +43,7 @@ neutron_opendaylight_conf_ini_overrides:
username: "admin"
password: "admin"
port_binding_controller: "pseudo-agentdb-binding"
- url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8080/controller/nb/v2/neutron"
+ url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8180/controller/nb/v2/neutron"
neutron_plugin_base:
- odl-router_v2
- metering
@@ -102,14 +102,29 @@ provider_networks:
- cinder_volume
- nova_compute
+# This repo is for ovs 2.9.2
user_external_repos_list:
- - repo: 'ppa:mardim/mardim-ppa'
+ - repo: 'deb http://ppa.launchpad.net/mardim/mardim-ppa/ubuntu xenial main'
+
+user_external_repo_keys_list:
+ - id: 6E2EEDF1A3925D9D727EB1176FAD8BA42AAAEB9F
+ keyserver: keyserver.ubuntu.com
openstack_host_specific_kernel_modules:
- name: openvswitch
ovs_nsh_required_metal_packages:
+ - python-six
+ - python3-six
- linux-headers-{{ ansible_kernel }}
- openvswitch-datapath-dkms
openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2 b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2
new file mode 100644
index 00000000..1ec821d5
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/templates/noha/user_sfc_scenarios_variables_ubuntu.yml.j2
@@ -0,0 +1,130 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+{% raw %}
+# # Debug and Verbose options.
+debug: false
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
+gnocchi_db_sync_options: ""
+
+ovs_nsh_support: true
+
+# Ensure the openvswitch kernel module is loaded
+# openstack_host_specific_kernel_modules:
+# - name: "openvswitch"
+# pattern: "CONFIG_OPENVSWITCH"
+# group: "network_hosts"
+
+# Use OpenDaylight SDN Controller
+neutron_plugin_type: "ml2.opendaylight"
+neutron_opendaylight_conf_ini_overrides:
+ ml2_odl:
+ username: "admin"
+ password: "admin"
+ port_binding_controller: "pseudo-agentdb-binding"
+ url: "http://{{ hostvars[groups['neutron_server'][0]]['ansible_eth1']['ipv4']['address'] }}:8180/controller/nb/v2/neutron"
+neutron_plugin_base:
+ - odl-router_v2
+ - metering
+ - networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin
+ - networking_sfc.services.sfc.plugin.SfcPlugin
+provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ host_bind_override: "eth12"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# This repo is used for ovs 2.9.2
+user_external_repos_list:
+ - repo: 'deb http://ppa.launchpad.net/mardim/mardim-ppa/ubuntu xenial main'
+
+user_external_repo_keys_list:
+ - id: 6E2EEDF1A3925D9D727EB1176FAD8BA42AAAEB9F
+ keyserver: keyserver.ubuntu.com
+
+openstack_host_specific_kernel_modules:
+ - name: openvswitch
+
+ovs_nsh_required_metal_packages:
+ - python-six
+ - python3-six
+ - linux-headers-{{ ansible_kernel }}
+ - openvswitch-datapath-dkms
+
+openstack_host_metal_distro_packages: "{{ ovs_nsh_required_metal_packages + _openstack_host_metal_distro_packages }}"
+
+# To avoid functest complaining because cirros vm gets stuck trying to contact the metadata server
+neutron_dnsmasq_force_metadata: True
+{% endraw %}
+
+{% if odl_repo_version is defined %}
+odl_version: "{{ odl_repo_version }}"
+{% endif %}
diff --git a/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/vars/main.yml b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/vars/main.yml
new file mode 100644
index 00000000..41051830
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/role/os-odl-sfc_osm/vars/main.yml
@@ -0,0 +1,27 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+odl_repo_version: "{{ lookup('env','ODL_VERSION') }}"
+
+osm_required_packages:
+ apt:
+ - apt-transport-https
+ - ca-certificates
+ - software-properties-common
+ - docker.io
+ - snapd
+ - lxd
+
+osm_devops_version: "master"
+osm_devops_git_url: "https://osm.etsi.org/gerrit/osm/devops.git"
+osm_devops_clone_location: "/home/{{ osm_install_user }}/osm-devops"
+osm_install_user: "devuser"
+
+osmrc_file_dest: "/root/osmrc"
diff --git a/scenarios/os-odl-sfc_osm/xci_overrides b/scenarios/os-odl-sfc_osm/xci_overrides
new file mode 100644
index 00000000..ecbff0ee
--- /dev/null
+++ b/scenarios/os-odl-sfc_osm/xci_overrides
@@ -0,0 +1,10 @@
+if [[ $XCI_FLAVOR == "ha" ]]; then
+ export VM_MEMORY_SIZE=20480
+else
+ export VM_MEMORY_SIZE=16384
+fi
+
+# Until this feature is developed, ODL_VERSION must be intialized:
+# https://github.com/ansible/ansible/issues/17329
+# otherwise the lookup in vars/main returns an empty string when not defined
+export ODL_VERSION=${ODL_VERSION:-latest_release}
diff --git a/setup.cfg b/setup.cfg
index 8e5f6230..d1d320cc 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,9 +1,12 @@
[metadata]
name = sfc
-version = 5
home-page = https://wiki.opnfv.org/display/sfc/Service+Function+Chaining+Home
[files]
packages = sfc
-scripts =
- sfc/tests/functest/run_sfc_tests.py
+
+[entry_points]
+console_scripts =
+ run_sfc_tests = sfc.tests.functest.run_sfc_tests:main
+xtesting.testcase =
+ functest-odl-sfc = sfc.tests.functest.run_sfc_tests:SfcFunctest
diff --git a/sfc/lib/cleanup.py b/sfc/lib/cleanup.py
index 32835fa8..e97034ad 100644
--- a/sfc/lib/cleanup.py
+++ b/sfc/lib/cleanup.py
@@ -1,8 +1,9 @@
+import logging
import sys
import time
-import logging
import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
+from openstack import connection
logger = logging.getLogger(__name__)
@@ -73,42 +74,88 @@ def delete_vims():
os_sfc_utils.delete_vim(t, vim_id=vim)
-# Creators is a list full of SNAPs objects
-def delete_openstack_objects(creators):
- for creator in reversed(creators):
- try:
- creator.clean()
- except Exception as e:
- logger.error('Unexpected error cleaning - %s', e)
+# Networking-odl generates a new security group when creating a router
+# which is not tracked by SNAPs
+def delete_untracked_security_groups():
+ openstack_sfc = os_sfc_utils.OpenStackSFC()
+ openstack_sfc.delete_all_security_groups()
def cleanup_odl(odl_ip, odl_port):
delete_odl_resources(odl_ip, odl_port, 'service-function-forwarder')
- delete_odl_resources(odl_ip, odl_port, 'service-function-chain')
- delete_odl_resources(odl_ip, odl_port, 'service-function-path')
- delete_odl_resources(odl_ip, odl_port, 'service-function')
+ # delete_odl_resources(odl_ip, odl_port, 'service-function-chain')
+ # delete_odl_resources(odl_ip, odl_port, 'service-function-path')
+ # delete_odl_resources(odl_ip, odl_port, 'service-function')
delete_odl_ietf_access_lists(odl_ip, odl_port)
-def cleanup(creators, odl_ip=None, odl_port=None):
+def cleanup_nsfc_objects():
+ '''
+ cleanup the networking-sfc objects created for the test
+ '''
+ # TODO Add n-sfc to snaps so that it can be removed through
+ # delete_openstack_objects
+ openstack_sfc = os_sfc_utils.OpenStackSFC()
+ openstack_sfc.delete_chain()
+ openstack_sfc.delete_port_groups()
+
+
+def cleanup_tacker_objects():
+ '''
+ cleanup the tacker objects created for the test
+ '''
delete_vnffgs()
delete_vnffgds()
delete_vnfs()
time.sleep(20)
delete_vnfds()
delete_vims()
- delete_openstack_objects(creators)
+
+
+def cleanup_mano_objects(mano):
+ '''
+ Cleanup the mano objects (chains, classifiers, etc)
+ '''
+ if mano == 'tacker':
+ cleanup_tacker_objects()
+ elif mano == 'no-mano':
+ cleanup_nsfc_objects()
+
+
+def delete_openstack_objects(testcase_config, creators):
+ conn = connection.from_config(verify=False)
+ for creator in creators:
+ if creator.name == testcase_config.subnet_name:
+ subnet_obj = creator
+
+ for creator in reversed(creators):
+ try:
+ logger.info("Deleting " + creator.name)
+ if creator.name == testcase_config.router_name:
+ logger.info("Removing subnet from router")
+ conn.network.remove_interface_from_router(
+ creator.id, subnet_obj.id)
+ time.sleep(2)
+ logger.info("Deleting router")
+ conn.network.delete_router(creator)
+ else:
+ creator.delete(conn.session)
+ time.sleep(2)
+ creators.remove(creator)
+ except Exception as e:
+ logger.error('Unexpected error cleaning - %s', e)
+
+
+def cleanup(testcase_config, creators, mano, odl_ip=None, odl_port=None):
+ cleanup_mano_objects(mano)
+ delete_openstack_objects(testcase_config, creators)
+ delete_untracked_security_groups()
if odl_ip is not None and odl_port is not None:
cleanup_odl(odl_ip, odl_port)
-def cleanup_from_bash(odl_ip=None, odl_port=None):
- delete_vnffgs()
- delete_vnffgds()
- delete_vnfs()
- time.sleep(20)
- delete_vnfds()
- delete_vims()
+def cleanup_from_bash(odl_ip=None, odl_port=None, mano='no-mano'):
+ cleanup_mano_objects(mano=mano)
if odl_ip is not None and odl_port is not None:
cleanup_odl(odl_ip, odl_port)
diff --git a/sfc/lib/config.py b/sfc/lib/config.py
index bc955d8b..bf9864a5 100644
--- a/sfc/lib/config.py
+++ b/sfc/lib/config.py
@@ -8,17 +8,19 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
+
+import logging
import os
-import yaml
import sfc
+import yaml
import functest
-
-import sfc.lib.test_utils as test_utils
-from functest.utils.constants import CONST
-import logging
import functest.utils.functest_utils as ft_utils
+import sfc.lib.test_utils as test_utils
+from functest.utils import config
+from functest.utils import env
+
logger = logging.getLogger(__name__)
@@ -28,8 +30,7 @@ class CommonConfig(object):
"""
def __init__(self):
- self.line_length = 30
- self.test_db = ft_utils.get_functest_config("results.test_db_url")
+ self.line_length = 35
self.functest_repo_path = os.path.dirname(functest.__file__)
self.functest_logging_api = os.path.join(self.functest_repo_path,
"ci", "logging.ini")
@@ -41,37 +42,59 @@ class CommonConfig(object):
self.sfc_test_dir, "vnfd-default-params-file")
self.vnffgd_dir = os.path.join(self.sfc_test_dir, "vnffgd-templates")
self.functest_results_dir = os.path.join(
- CONST.dir_results, "odl-sfc")
- self.config_file = os.path.join(self.sfc_test_dir, "config.yaml")
+ getattr(config.CONF, 'dir_results'), "odl-sfc")
+ self.config_file = os.path.join(self.sfc_test_dir, "config.yaml")
self.vim_file = os.path.join(self.sfc_test_dir, "register-vim.json")
- self.installer_type = CONST.__getattribute__('INSTALLER_TYPE')
-
- self.installer_fields = test_utils.fill_installer_dict(
- self.installer_type)
-
- self.installer_ip = CONST.__getattribute__('INSTALLER_IP')
-
- self.installer_user = ft_utils.get_parameter_from_yaml(
- self.installer_fields['user'], self.config_file)
-
- try:
- self.installer_password = ft_utils.get_parameter_from_yaml(
- self.installer_fields['password'], self.config_file)
- except:
- self.installer_password = None
+ pod_yaml_exists = os.path.isfile(self.sfc_test_dir + "/pod.yaml")
- try:
- self.installer_key_file = ft_utils.get_parameter_from_yaml(
- self.installer_fields['pkey_file'], self.config_file)
- except:
- self.installer_key_file = None
+ if pod_yaml_exists:
+ self.pod_file = os.path.join(self.sfc_test_dir, "pod.yaml")
+ self.nodes_pod = ft_utils.get_parameter_from_yaml(
+ "nodes", self.pod_file)
+ self.host_ip = self.nodes_pod[0]['ip']
+ self.host_user = self.nodes_pod[0]['user']
- try:
- self.installer_cluster = ft_utils.get_parameter_from_yaml(
- self.installer_fields['cluster'], self.config_file)
- except:
+ self.installer_type = 'configByUser'
+ self.installer_ip = self.host_ip
+ self.installer_user = self.host_user
self.installer_cluster = None
+ try:
+ self.installer_password = self.host_ip[0]['password']
+ except Exception:
+ self.installer_password = None
+
+ try:
+ self.installer_key_file = self.host_ip[0]['key_filename']
+ except Exception:
+ self.installer_key_file = None
+ else:
+ self.nodes_pod = None
+ self.host_ip = None
+ self.installer_type = env.get('INSTALLER_TYPE')
+ self.installer_fields = test_utils.fill_installer_dict(
+ self.installer_type)
+ self.installer_ip = env.get('INSTALLER_IP')
+ self.installer_user = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['user'], self.config_file)
+
+ try:
+ self.installer_password = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['password'], self.config_file)
+ except Exception:
+ self.installer_password = None
+
+ try:
+ self.installer_key_file = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['pkey_file'], self.config_file)
+ except Exception:
+ self.installer_key_file = None
+
+ try:
+ self.installer_cluster = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['cluster'], self.config_file)
+ except Exception:
+ self.installer_cluster = None
self.flavor = ft_utils.get_parameter_from_yaml(
"defaults.flavor", self.config_file)
@@ -87,8 +110,22 @@ class CommonConfig(object):
"defaults.image_format", self.config_file)
self.image_url = ft_utils.get_parameter_from_yaml(
"defaults.image_url", self.config_file)
- self.dir_functest_data = ft_utils.get_functest_config(
- "general.dir.functest_data")
+ self.mano_component = ft_utils.get_parameter_from_yaml(
+ "defaults.mano_component", self.config_file)
+ try:
+ self.vnf_image_name = ft_utils.get_parameter_from_yaml(
+ "defaults.vnf_image_name", self.config_file)
+ self.vnf_image_url = ft_utils.get_parameter_from_yaml(
+ "defaults.vnf_image_url", self.config_file)
+ self.vnf_image_format = ft_utils.get_parameter_from_yaml(
+ "defaults.vnf_image_format", self.config_file)
+ except ValueError:
+ # If the parameter does not exist we use the default
+ self.vnf_image_name = self.image_name
+ self.vnf_image_url = self.image_url
+ self.vnf_image_format = self.image_format
+
+ self.dir_functest_data = getattr(config.CONF, 'dir_functest_data')
class TestcaseConfig(object):
@@ -104,7 +141,8 @@ class TestcaseConfig(object):
testcases_yaml = yaml.safe_load(f)
test_config = testcases_yaml['testcases'].get(testcase, None)
if test_config is None:
- logger.error('Test {0} configuration is not present in {1}'
- .format(testcase, common_config.config_file))
+ logger.error(
+ 'Test %s configuration is not present in %s',
+ testcase, common_config.config_file)
# Update class fields with configuration variables dynamically
self.__dict__.update(**test_config)
diff --git a/sfc/lib/odl_utils.py b/sfc/lib/odl_utils.py
index 45937263..2c657a13 100644
--- a/sfc/lib/odl_utils.py
+++ b/sfc/lib/odl_utils.py
@@ -1,130 +1,199 @@
import ConfigParser
+import functools
+import json
+import logging
import os
+import re
import requests
import time
-import json
-import re
-import logging
-import functest.utils.functest_utils as ft_utils
-import sfc.lib.openstack_utils as os_sfc_utils
+import sfc.lib.openstack_utils as os_sfc_utils
logger = logging.getLogger(__name__)
+odl_username = 'admin'
+odl_password = 'admin'
+
+ODL_MODULE_EXCEPTIONS = {
+ "service-function-path-state": "service-function-path"
+}
+
+ODL_PLURAL_EXCEPTIONS = {
+ "service-function-path-state": "service-function-paths-state"
+}
def actual_rsps_in_compute(ovs_logger, compute_ssh):
'''
Example flows that match the regex (line wrapped because of flake8)
- table=101, n_packets=7, n_bytes=595, priority=500,tcp,in_port=2,tp_dst=80
- actions=push_nsh,load:0x1->NXM_NX_NSH_MDTYPE[],load:0x3->NXM_NX_NSH_NP[],
- load:0x27->NXM_NX_NSP[0..23],load:0xff->NXM_NX_NSI[],
- load:0xffffff->NXM_NX_NSH_C1[],load:0->NXM_NX_NSH_C2[],resubmit(,17)
+ cookie=0xf005ba1100000002, duration=5.843s, table=101, n_packets=0,
+ n_bytes=0, priority=500,tcp,in_port=48,tp_dst=80
+ actions=load:0x169->NXM_NX_REG2[8..31],load:0xff->NXM_NX_REG2[0..7],
+ resubmit(,17)', u' cookie=0xf005ba1100000002, duration=5.825s, table=101,
+ n_packets=2, n_bytes=684, priority=10 actions=resubmit(,17)
'''
- match_rsp = re.compile(
- r'.+tp_dst=([0-9]+).+load:(0x[0-9a-f]+)->NXM_NX_NSP\[0\.\.23\].+')
+ match_rsp = re.compile(r'.+'
+ r'(tp_(?:src|dst)=[0-9]+)'
+ r'.+'
+ r'actions=load:(0x[0-9a-f]+)->NXM_NX_REG2'
+ r'.+')
# First line is OFPST_FLOW reply (OF1.3) (xid=0x2):
# This is not a flow so ignore
flows = (ovs_logger.ofctl_dump_flows(compute_ssh, 'br-int', '101')
.strip().split('\n')[1:])
matching_flows = [match_rsp.match(f) for f in flows]
- # group(1) = 22 (tp_dst value) | group(2) = 0xff (rsp value)
- rsps_in_compute = ['{0}_{1}'.format(mf.group(2), mf.group(1))
+ # group(1) = tsp_dst=22 | group(2) = 0xff (rsp value)
+ rsps_in_compute = ['{0}|{1}'.format(mf.group(2), mf.group(1))
for mf in matching_flows if mf is not None]
return rsps_in_compute
-def get_active_rsps(odl_ip, odl_port):
+def get_active_rsps_on_ports(odl_ip, odl_port, neutron_ports):
'''
Queries operational datastore and returns the RSPs for which we have
- created a classifier (ACL). These are considered as active RSPs
- for which classification rules should exist in the compute nodes
+ created a classifier (ACL) on the specified neutron ports. These are
+ considered as active RSPs on those ports for which classification rules
+ should exist in the compute node on which such ports are located.
- This function enhances the returned dictionary with the
- destination port of the ACL.
+ This function enhances each returned RSP with the openflow matches on
+ the tcp ports that classify traffic into that RSP.
'''
+ port_ids = [port.id for port in neutron_ports]
acls = get_odl_acl_list(odl_ip, odl_port)
- rsps = []
+ rsps = {}
for acl in acls['access-lists']['acl']:
try:
# We get the first ace. ODL creates a new ACL
# with one ace for each classifier
ace = acl['access-list-entries']['ace'][0]
- except:
+ except Exception:
logger.warn('ACL {0} does not have an ACE'.format(
acl['acl-name']))
continue
- if not ('netvirt-sfc-acl:rsp-name' in ace['actions']):
+ matches = ace['matches']
+
+ # We are just interested in the destination-port-range matches
+ # that we use throughout the tests
+ if matches.get('destination-port-range') is None:
+ continue
+ tcp_port = matches['destination-port-range']['lower-port']
+
+ # A single ace may classify traffic into a forward path
+ # and optionally into a reverse path if destination port is set
+ src_port = matches.get('netvirt-sfc-acl:source-port-uuid')
+ dst_port = matches.get('netvirt-sfc-acl:destination-port-uuid')
+ forward_of_match = None
+ reverse_of_match = None
+ if src_port in port_ids:
+ forward_of_match = 'tp_dst=' + str(tcp_port)
+ if dst_port in port_ids:
+ # For classification to the reverse path
+ # the openflow match inverts
+ reverse_of_match = 'tp_src=' + str(tcp_port)
+
+ # This ACL does not apply to any of the given ports
+ if not forward_of_match and not reverse_of_match:
continue
- rsp_name = ace['actions']['netvirt-sfc-acl:rsp-name']
- rsp = get_odl_resource_elem(odl_ip,
- odl_port,
- 'rendered-service-path',
- rsp_name,
- datastore='operational')
- '''
- Rsps are returned in the format:
- {
- "rendered-service-path": [
- {
- "name": "Path-red-Path-83",
- "path-id": 83,
- ...
- "rendered-service-path-hop": [
- {
- ...
- "service-function-name": "testVNF1",
- "service-index": 255
- ...
- 'rendered-service-path' Is returned as a list with one
- element (we select by name and the names are unique)
- '''
- rsp_port = rsp['rendered-service-path'][0]
- rsp_port['dst-port'] = (ace['matches']
- ['destination-port-range']['lower-port'])
- rsps.append(rsp_port)
- return rsps
-
-
-def promised_rsps_in_computes(odl_ip, odl_port):
+ actions = ace['actions']
+ rsp_names = get_rsps_from_netvirt_acl_actions(odl_ip,
+ odl_port,
+ actions)
+
+ for rsp_name in rsp_names:
+ rsp = rsps.get(rsp_name)
+ if not rsp:
+ rsp = get_rsp(odl_ip, odl_port, rsp_name)
+ of_matches = rsp.get('of-matches', [])
+ if reverse_of_match and rsp.get('reverse-path'):
+ of_matches.append(reverse_of_match)
+ elif forward_of_match and not rsp.get('reverse-path'):
+ of_matches.append(forward_of_match)
+ rsp['of-matches'] = of_matches
+ rsps[rsp_name] = rsp
+
+ return rsps.values()
+
+
+def get_rsps_from_netvirt_acl_actions(odl_ip, odl_port, netvirt_acl_actions):
'''
- Return a list of rsp_port which represents the rsp id and the destination
- port configured in ODL
+ Return the list of RSPs referenced from the netvirt sfc redirect action
'''
- rsps = get_active_rsps(odl_ip, odl_port)
- rsps_in_computes = ['{0}_{1}'.format(hex(rsp['path-id']), rsp['dst-port'])
- for rsp in rsps]
+ rsp_names = []
+
+ if 'netvirt-sfc-acl:rsp-name' in netvirt_acl_actions:
+ rsp_names.append(netvirt_acl_actions['netvirt-sfc-acl:rsp-name'])
+
+ if 'netvirt-sfc-acl:sfp-name' in netvirt_acl_actions:
+ # If the acl redirect action is a sfp instead of rsp
+ # we need to get the rsps associated to that sfp
+ sfp_name = netvirt_acl_actions['netvirt-sfc-acl:sfp-name']
+ sfp_state = get_odl_resource_elem(odl_ip,
+ odl_port,
+ 'service-function-path-state',
+ sfp_name,
+ datastore='operational')
+ sfp_rsps = sfp_state.get('sfp-rendered-service-path', [])
+ sfp_rsp_names = [rsp['name'] for rsp in sfp_rsps if 'name' in rsp]
+ rsp_names.extend(sfp_rsp_names)
+
+ return rsp_names
+
+
+def get_rsp(odl_ip, odl_port, rsp_name):
+ rsp = get_odl_resource_elem(odl_ip,
+ odl_port,
+ 'rendered-service-path',
+ rsp_name,
+ datastore='operational')
+ return rsp
+
+
+def promised_rsps_in_compute(odl_ip, odl_port, compute_ports):
+ '''
+ Return a list of rsp|of_match which represents the RSPs and openflow
+ matches on the source/destination port that classify traffic into such
+ RSP as configured in ODL ACLs
+ '''
+ rsps = get_active_rsps_on_ports(odl_ip, odl_port, compute_ports)
+ rsps_in_computes = ['{0}|{1}'.format(hex(rsp['path-id']), of_match)
+ for rsp in rsps
+ for of_match in rsp['of-matches']]
return rsps_in_computes
-@ft_utils.timethis
+def timethis(func):
+ """Measure the time it takes for a function to complete"""
+ @functools.wraps(func)
+ def timed(*args, **kwargs):
+ ts = time.time()
+ result = func(*args, **kwargs)
+ te = time.time()
+ elapsed = '{0}'.format(te - ts)
+ logger.info('{f}(*{a}, **{kw}) took: {t} sec'.format(
+ f=func.__name__, a=args, kw=kwargs, t=elapsed))
+ return result, elapsed
+ return timed
+
+
+@timethis
def wait_for_classification_rules(ovs_logger, compute_nodes, odl_ip, odl_port,
- timeout=200):
+ compute_name, neutron_ports, timeout=200):
'''
Check if the classification rules configured in ODL are implemented in OVS.
We know by experience that this process might take a while
'''
try:
- # Find the compute where the client is
- compute_client = os_sfc_utils.get_compute_client()
-
- for compute_node in compute_nodes:
- if compute_node.name in compute_client:
- compute = compute_node
- try:
- compute
- except NameError:
- logger.debug("No compute where the client is was found")
- raise Exception("No compute where the client is was found")
+ compute = find_compute(compute_name, compute_nodes)
# Find the configured rsps in ODL. Its format is nsp_destPort
promised_rsps = []
timeout2 = 10
while not promised_rsps:
- promised_rsps = promised_rsps_in_computes(odl_ip, odl_port)
+ promised_rsps = promised_rsps_in_compute(odl_ip, odl_port,
+ neutron_ports)
timeout2 -= 1
if timeout2 == 0:
os_sfc_utils.get_tacker_items()
@@ -133,7 +202,13 @@ def wait_for_classification_rules(ovs_logger, compute_nodes, odl_ip, odl_port,
time.sleep(3)
while timeout > 0:
- logger.info("RSPs in ODL Operational DataStore:")
+ # When swapping classifiers promised_rsps update takes time to
+ # get updated
+ # TODO: Need to optimise this code
+ promised_rsps = promised_rsps_in_compute(odl_ip, odl_port,
+ neutron_ports)
+ logger.info("RSPs in ODL Operational DataStore"
+ "for compute '{}':".format(compute_name))
logger.info("{0}".format(promised_rsps))
# Fetch the rsps implemented in the compute
@@ -177,16 +252,57 @@ def get_odl_ip_port(nodes):
return ip, port
-def pluralize(s):
- return '{0}s'.format(s)
+def get_odl_ip_port_no_installer(nodes_pod):
+ node_index = 0
+ for n in nodes_pod:
+ if n['role'] == 'Controller':
+ break
+ node_index += 1
+ remote_ml2_conf_etc = '/etc/neutron/plugins/ml2/ml2_conf.ini'
+ os.system('scp {0}@{1}:{2} .'.
+ format(nodes_pod[node_index]['user'],
+ nodes_pod[node_index]['ip'],
+ remote_ml2_conf_etc))
+ file = open('ml2_conf.ini', 'r')
+ string = re.findall(r'[0-9]+(?:\.[0-9]+){3}\:[0-9]+', file.read())
+ file.close()
+ ip = string[0].split(':')[0]
+ port = string[0].split(':')[1]
+ return ip, port
+
+
+def get_odl_username_password():
+ local_ml2_conf_file = os.path.join(os.getcwd(), 'ml2_conf.ini')
+ con_par = ConfigParser.RawConfigParser()
+ con_par.read(local_ml2_conf_file)
+ global odl_username
+ odl_username = con_par.get('ml2_odl', 'username')
+ global odl_password
+ odl_password = con_par.get('ml2_odl', 'password')
+ return odl_username, odl_password
+
+
+def pluralize(resource):
+ plural = ODL_PLURAL_EXCEPTIONS.get(resource, None)
+ if not plural:
+ plural = '{0}s'.format(resource)
+ return plural
+
+
+def get_module(resource):
+ module = ODL_MODULE_EXCEPTIONS.get(resource, None)
+ if not module:
+ module = resource
+ return module
def format_odl_resource_list_url(odl_ip, odl_port, resource,
- datastore='config', odl_user='admin',
- odl_pwd='admin'):
+ datastore='config', odl_user=odl_username,
+ odl_pwd=odl_password):
return ('http://{usr}:{pwd}@{ip}:{port}/restconf/{ds}/{rsrc}:{rsrcs}'
- .format(usr=odl_user, pwd=odl_pwd, ip=odl_ip, port=odl_port,
- ds=datastore, rsrc=resource, rsrcs=pluralize(resource)))
+ .format(usr=odl_username, pwd=odl_password, ip=odl_ip,
+ port=odl_port, ds=datastore, rsrc=get_module(resource),
+ rsrcs=pluralize(resource)))
def format_odl_resource_elem_url(odl_ip, odl_port, resource,
@@ -212,7 +328,12 @@ def get_odl_resource_elem(odl_ip, odl_port, resource,
elem_name, datastore='config'):
url = format_odl_resource_elem_url(
odl_ip, odl_port, resource, elem_name, datastore=datastore)
- return requests.get(url).json()
+ response = requests.get(url).json()
+ # Response is in the format of a dictionary containing
+ # a single value that is an array with the element requested:
+ # {'resource' : [element]}
+ # Return just the element
+ return response.get(resource, [{}])[0]
def delete_odl_resource_elem(odl_ip, odl_port, resource, elem_name,
@@ -230,10 +351,10 @@ def odl_acl_types_names(acl_json):
def format_odl_acl_list_url(odl_ip, odl_port,
- odl_user='admin', odl_pwd='admin'):
+ odl_user=odl_username, odl_pwd=odl_password):
acl_list_url = ('http://{usr}:{pwd}@{ip}:{port}/restconf/config/'
'ietf-access-control-list:access-lists'
- .format(usr=odl_user, pwd=odl_pwd,
+ .format(usr=odl_username, pwd=odl_password,
ip=odl_ip, port=odl_port))
return acl_list_url
@@ -283,3 +404,64 @@ def delete_acl(clf_name, odl_ip, odl_port):
odl_port,
'ietf-access-control-list:ipv4-acl',
clf_name)
+
+
+def find_compute(compute_client_name, compute_nodes):
+ for compute_node in compute_nodes:
+ if compute_node.name in compute_client_name:
+ compute = compute_node
+ try:
+ compute
+ except NameError:
+ logger.debug("No compute, where the client is, was found")
+ raise Exception("No compute, where the client is, was found")
+
+ return compute
+
+
+def check_vnffg_deletion(odl_ip, odl_port, ovs_logger, neutron_ports,
+ compute_client_name, compute_nodes, retries=20):
+ '''
+ First, RSPs are checked in the operational datastore of ODL. Nothing
+ should exist. As it might take a while for ODL to remove that, some
+ retries are needed.
+
+ Secondly, we check that the classification rules are removed too
+ '''
+
+ retries_counter = retries
+
+ # Check RSPs
+ while retries_counter > 0:
+ if get_active_rsps_on_ports(odl_ip, odl_port, neutron_ports):
+ retries_counter -= 1
+ time.sleep(3)
+ else:
+ break
+
+ if not retries_counter:
+ logger.debug("RSPs are still active in the MD-SAL")
+ return False
+
+ # Get the compute where the client is running
+ try:
+ compute = find_compute(compute_client_name, compute_nodes)
+ except Exception as e:
+ logger.debug("There was an error getting the compute: %s" % e)
+ return False
+
+ retries_counter = retries
+
+ # Check classification flows
+ while retries_counter > 0:
+ if (actual_rsps_in_compute(ovs_logger, compute.ssh_client)):
+ retries_counter -= 1
+ time.sleep(3)
+ else:
+ break
+
+ if not retries_counter:
+ logger.debug("Classification flows still in the compute")
+ return False
+
+ return True
diff --git a/sfc/lib/openstack_utils.py b/sfc/lib/openstack_utils.py
index f55f62e8..c46ff123 100644
--- a/sfc/lib/openstack_utils.py
+++ b/sfc/lib/openstack_utils.py
@@ -1,213 +1,553 @@
-import logging
import os
import time
import json
+import logging
import yaml
-from tackerclient.tacker import client as tackerclient
-from functest.utils.constants import CONST
-
-from snaps.openstack.tests import openstack_tests
-
-from snaps.openstack.create_image import OpenStackImage
-from snaps.config.image import ImageConfig
-
-from snaps.config.flavor import FlavorConfig
-from snaps.openstack.create_flavor import OpenStackFlavor
-
-from snaps.config.network import NetworkConfig, SubnetConfig, PortConfig
-from snaps.openstack.create_network import OpenStackNetwork
-
-from snaps.config.router import RouterConfig
-from snaps.openstack.create_router import OpenStackRouter
-
-from snaps.config.security_group import (
- Protocol, SecurityGroupRuleConfig, Direction, SecurityGroupConfig)
+import urllib2
-from snaps.openstack.create_security_group import OpenStackSecurityGroup
+from tackerclient.tacker import client as tackerclient
+from functest.utils import constants
+from functest.utils import env
+from snaps.openstack.tests import openstack_tests
+from snaps.config.vm_inst import FloatingIpConfig
import snaps.openstack.create_instance as cr_inst
-from snaps.config.vm_inst import VmInstanceConfig, FloatingIpConfig
-
from snaps.openstack.utils import (
- nova_utils, neutron_utils, glance_utils, heat_utils, keystone_utils)
+ nova_utils, neutron_utils, heat_utils, keystone_utils)
+from openstack import connection
+from neutronclient.neutron import client as neutronclient
logger = logging.getLogger(__name__)
DEFAULT_TACKER_API_VERSION = '1.0'
+DEFAULT_API_VERSION = '2'
class OpenStackSFC:
def __init__(self):
+ self.conn = self.get_os_connection()
self.os_creds = openstack_tests.get_credentials(
- os_env_file=CONST.__getattribute__('openstack_creds'))
+ os_env_file=constants.ENV_FILE)
self.creators = []
self.nova = nova_utils.nova_client(self.os_creds)
self.neutron = neutron_utils.neutron_client(self.os_creds)
- self.glance = glance_utils.glance_client(self.os_creds)
self.heat = heat_utils.heat_client(self.os_creds)
+ self.keystone = keystone_utils.keystone_client(self.os_creds)
+ self.neutron_client = neutronclient.\
+ Client(self.get_neutron_client_version(),
+ session=self.conn.session)
- def register_glance_image(self, name, url, img_format, public):
- image_settings = ImageConfig(name=name, img_format=img_format, url=url,
- public=public, image_user='admin')
-
- # TODO Remove this when tacker is part of SNAPS
- self.image_settings = image_settings
+ def get_os_connection(self):
+ return connection.from_config(verify=False)
- image_creator = OpenStackImage(self.os_creds, image_settings)
- image_creator.create()
+ def get_neutron_client_version(self):
+ api_version = os.getenv('OS_NETWORK_API_VERSION')
+ if api_version is not None:
+ logger.info("OS_NETWORK_API_VERSION is %s" % api_version)
+ return api_version
+ return DEFAULT_API_VERSION
- self.creators.append(image_creator)
- return image_creator
+ def register_glance_image(self, name, url, img_format, public):
+ logger.info("Registering the image...")
+ image = self.conn.image.find_image(name)
+ if image:
+ logger.info("Image %s already exists." % image.name)
+ else:
+ if 'http' in url:
+ logger.info("Downloading image")
+ response = urllib2.urlopen(url)
+ image_data = response.read()
+ else:
+ with open(url) as f:
+ image_data = f.read()
+
+ image_settings = {'name': name,
+ 'disk_format': img_format,
+ 'data': image_data,
+ 'is_public': public,
+ 'container_format': 'bare'}
+ image = self.conn.image.upload_image(**image_settings)
+ self.creators.append(image)
+ logger.info("Image created")
+
+ self.image_settings = image_settings
+
+ return image
def create_flavor(self, name, ram, disk, vcpus):
- flavor_settings = FlavorConfig(name=name, ram=ram, disk=disk,
- vcpus=vcpus)
- flavor_creator = OpenStackFlavor(self.os_creds, flavor_settings)
- flavor = flavor_creator.create()
+ logger.info("Creating flavor...")
+ flavor_settings = {"name": name, "ram": ram, "disk": disk,
+ "vcpus": vcpus}
- self.creators.append(flavor_creator)
+ flavor = self.conn.compute.create_flavor(**flavor_settings)
+
+ self.creators.append(flavor)
return flavor
def create_network_infrastructure(self, net_name, subnet_name, subnet_cidr,
router_name):
+ logger.info("Creating Networks...")
# Network and subnet
- subnet_settings = SubnetConfig(name=subnet_name, cidr=subnet_cidr)
- network_settings = NetworkConfig(name=net_name,
- subnet_settings=[subnet_settings])
- network_creator = OpenStackNetwork(self.os_creds, network_settings)
- network = network_creator.create()
+ network = self.conn.network.create_network(name=net_name)
+ self.creators.append(network)
- self.creators.append(network_creator)
+ subnet_settings = {"name": subnet_name, "cidr": subnet_cidr,
+ "network_id": network.id, 'ip_version': '4'}
+ subnet = self.conn.network.create_subnet(**subnet_settings)
+ self.creators.append(subnet)
# Router
- ext_network_name = CONST.__getattribute__('EXTERNAL_NETWORK')
+ ext_network_name = env.get('EXTERNAL_NETWORK')
+ ext_net = self.conn.network.find_network(ext_network_name)
+ router_dict = {'network_id': ext_net.id}
- router_settings = RouterConfig(name=router_name,
- external_gateway=ext_network_name,
- internal_subnets=[subnet_name])
+ logger.info("Creating Router...")
+ router = self.conn.network.create_router(name=router_name)
- router_creator = OpenStackRouter(self.os_creds, router_settings)
- router = router_creator.create()
+ self.conn.network.add_interface_to_router(router.id,
+ subnet_id=subnet.id)
- self.creators.append(router_creator)
+ self.conn.network.update_router(router.id,
+ external_gateway_info=router_dict)
+ router_obj = self.conn.network.get_router(router.id)
+ self.creators.append(router_obj)
- return network, router
+ return network, router_obj
def create_security_group(self, sec_grp_name):
- rule_ping = SecurityGroupRuleConfig(sec_grp_name=sec_grp_name,
- direction=Direction.ingress,
- protocol=Protocol.icmp)
-
- rule_ssh = SecurityGroupRuleConfig(sec_grp_name=sec_grp_name,
- direction=Direction.ingress,
- protocol=Protocol.tcp,
- port_range_min=22,
- port_range_max=22)
-
- rule_http = SecurityGroupRuleConfig(sec_grp_name=sec_grp_name,
- direction=Direction.ingress,
- protocol=Protocol.tcp,
- port_range_min=80,
- port_range_max=80)
+ logger.info("Creating the security groups...")
+ sec_group = self.conn.network.create_security_group(name=sec_grp_name)
- rules = [rule_ping, rule_ssh, rule_http]
+ rule_ping = {"security_group_id": sec_group.id,
+ "direction": "ingress",
+ "protocol": "icmp"}
- secgroup_settings = SecurityGroupConfig(name=sec_grp_name,
- rule_settings=rules)
+ rule_ssh = {"security_group_id": sec_group.id,
+ "direction": "ingress",
+ "protocol": "tcp",
+ "port_range_min": 22,
+ "port_range_max": 22}
- sec_group_creator = OpenStackSecurityGroup(self.os_creds,
- secgroup_settings)
- sec_group = sec_group_creator.create()
+ rule_http = {"security_group_id": sec_group.id,
+ "direction": "ingress",
+ "protocol": "tcp",
+ "port_range_min": 80,
+ "port_range_max": 80}
- self.creators.append(sec_group_creator)
+ rules = [rule_ping, rule_ssh, rule_http]
+
+ for rule in rules:
+ self.conn.network.create_security_group_rule(**rule)
+
+ self.creators.append(sec_group)
return sec_group
- def create_instance(self, vm_name, flavor_name, image_creator, network,
- secgrp, av_zone):
+ def create_instance(self, vm_name, flavor, image, network,
+ sec_group, av_zone, ports, port_security=True):
+ logger.info("Creating Key Pair {}...".format(vm_name))
+
+ keypair = self.conn.compute.\
+ create_keypair(name="{}_keypair".format(vm_name))
+ self.creators.append(keypair)
+ flavor_obj = self.conn.compute.find_flavor(flavor)
+
+ logger.info("Creating Port {}...".format(ports))
+ port_list = []
+ for port in ports:
+ if port_security:
+ port_obj = self.conn.network.create_port(
+ name=port, is_port_security_enabled=port_security,
+ network_id=network.id, security_group_ids=[sec_group.id])
+ else:
+ port_obj = self.conn.network.create_port(
+ name=port, is_port_security_enabled=port_security,
+ network_id=network.id)
+ port_list.append(port_obj)
+ self.creators.append(port_obj)
+ logger.info("Creating the instance {}...".format(vm_name))
+
+ if len(port_list) > 1:
+ network_list = [{"port": port_list[0].id},
+ {"port": port_list[1].id}]
+ else:
+ network_list = [{"port": port_obj.id}]
- port_settings = PortConfig(name=vm_name + '-port',
- network_name=network.name)
+ instance = self.conn.compute.create_server(name=vm_name,
+ image_id=image.id,
+ flavor_id=flavor_obj.id,
+ networks=network_list,
+ key_name=keypair.name,
+ availability_zone=av_zone)
- instance_settings = VmInstanceConfig(
- name=vm_name, flavor=flavor_name,
- security_group_names=str(secgrp.name),
- port_settings=[port_settings],
- availability_zone=av_zone)
+ logger.info("Waiting for {} to become Active".format(instance.name))
+ self.conn.compute.wait_for_server(instance)
+ logger.info("{} is active".format(instance.name))
- instance_creator = cr_inst.OpenStackVmInstance(
- self.os_creds,
- instance_settings,
- image_creator.image_settings)
+ self.creators.append(instance)
- instance = instance_creator.create()
+ return instance, port_list
- self.creators.append(instance_creator)
- return instance, instance_creator
+ def get_instance(self, instance_id):
+ """
+ Return a dictionary of metadata for a server instance
+ """
+ return self.conn.compute.get_server_metadata(instance_id)
def get_av_zones(self):
'''
Return the availability zone each host belongs to
'''
- hosts = nova_utils.get_hypervisor_hosts(self.nova)
+ hosts = self.get_hypervisor_hosts()
return ['nova::{0}'.format(host) for host in hosts]
+ def get_hypervisor_hosts(self):
+ """
+ Returns the host names of all nova nodes with active hypervisors
+ :param nova: the Nova client
+ :return: a list of hypervisor host names
+ """
+ try:
+ nodes = []
+ hypervisors = self.conn.compute.hypervisors()
+ for hypervisor in hypervisors:
+ if hypervisor.state == "up":
+ nodes.append(hypervisor.name)
+ return nodes
+ except Exception as e:
+ logger.error("Error [get_hypervisors(compute)]: %s" % e)
+ return None
+
def get_compute_client(self):
'''
Return the compute where the client sits
'''
- compute = nova_utils.get_server(self.nova, server_name='client')
- return compute
+ return self.get_vm_compute('client')
+
+ def get_compute_server(self):
+ '''
+ Return the compute where the server sits
+ '''
+ return self.get_vm_compute('server')
- def assign_floating_ip(self, router, vm, vm_creator):
+ def get_vm_compute(self, vm_name):
'''
- Assign a floating ips to all the VMs
+ Return the compute where the vm sits
'''
- name = vm.name + "-float"
- port_name = vm.ports[0].name
- float_ip = FloatingIpConfig(name=name,
- port_name=port_name,
- router_name=router.name)
- ip = vm_creator.add_floating_ip(float_ip)
+ for creator in self.creators:
+ # We want to filter the vm creators
+ if hasattr(creator, 'get_vm_inst'):
+ # We want to fetch by vm_name
+ if creator.get_vm_inst().name == vm_name:
+ return creator.get_vm_inst().compute_host
+
+ raise Exception("There is no VM with name '{}'!!".format(vm_name))
+
+ def get_port_by_ip(self, ip_address):
+ """
+ Return a dictionary of metadata for a port instance
+ by its ip_address
+ """
+
+ ports = self.conn.network.ports()
+ for port in ports:
+ if port.fixed_ips[0]['ip_address'] == ip_address:
+ return self.conn.network.get_port(port.id)
+
+ def assign_floating_ip(self, vm, vm_port):
+ '''
+ Assign floating ips to all the VMs
+ '''
+ logger.info(" Creating floating ips ")
+
+ ext_network_name = env.get('EXTERNAL_NETWORK')
+ ext_net = self.conn.network.find_network(ext_network_name)
+
+ fip = self.conn.network.create_ip(floating_network_id=ext_net.id,
+ port_id=vm_port.id)
+ logger.info(
+ " FLoating IP address {} created".format(fip.floating_ip_address))
+
+ logger.info(" Adding Floating IPs to instances ")
+ self.conn.compute.add_floating_ip_to_server(
+ vm.id, fip.floating_ip_address)
- return ip.ip
+ self.creators.append(fip)
+ return fip.floating_ip_address
# We need this function because tacker VMs cannot be created through SNAPs
- def assign_floating_ip_vnfs(self, router):
+ def assign_floating_ip_vnfs(self, router, ips=None):
'''
- Assign a floating ips to all the SFs
+ Assign floating ips to all the SFs. Optionally specify the
+ subnet IPs that a floating IP should be assigned to, assuming that the
+ SF is connected to a single subnet globally and per port.
'''
stacks = self.heat.stacks.list()
fips = []
+ project_name = 'admin'
for stack in stacks:
servers = heat_utils.get_stack_servers(self.heat,
self.nova,
- self.neutron,
- stack)
+ self.neutron_client,
+ self.keystone,
+ stack,
+ project_name)
sf_creator = cr_inst.generate_creator(self.os_creds,
servers[0],
- self.image_settings)
- port_name = servers[0].ports[0].name
+ self.image_settings,
+ project_name)
+
name = servers[0].name + "-float"
+ if ips is None:
+ port_name = servers[0].ports[0].name
+ else:
+ port_name = None
+ for port in servers[0].ports:
+ if port.ips[0]['ip_address'] in ips:
+ port_name = port.name
+ break
+
+ if port_name is None:
+ err_msg = ("The VNF {} does not have any suitable port {} "
+ "for floating IP assignment"
+ .format(name,
+ 'with ip any of ' +
+ str(ips) if ips else ''))
+ logger.error(err_msg)
+ raise Exception(err_msg)
+
float_ip = FloatingIpConfig(name=name,
port_name=port_name,
router_name=router.name)
ip = sf_creator.add_floating_ip(float_ip)
+ self.creators.append(sf_creator)
fips.append(ip.ip)
return fips
- def get_client_port_id(self, vm):
+ def get_instance_port(self, vm, vm_creator, port_name=None):
'''
Get the neutron port id of the client
'''
- port_id = neutron_utils.get_port(self.neutron,
- port_name=vm.name + "-port")
- return port_id
+ if not port_name:
+ port_name = vm.name + "-port"
+ port = vm_creator.get_port_by_name(port_name)
+ if port is not None:
+ return port
+ else:
+ logger.error("The VM {0} does not have any port"
+ " with name {1}".format(vm.name, port_name))
+ raise Exception("Client VM does not have the desired port")
-# TACKER SECTION #
+ def delete_all_security_groups(self):
+ '''
+ Deletes all the available security groups
+ Needed until this bug is fixed:
+ https://bugs.launchpad.net/networking-odl/+bug/1763705
+ '''
+ logger.info("Deleting remaining security groups...")
+ sec_groups = self.conn.network.security_groups()
+ for sg in sec_groups:
+ self.conn.network.delete_security_group(sg)
+
+ def wait_for_vnf(self, vnf_creator):
+ '''
+ Waits for VNF to become active
+ '''
+ return vnf_creator.vm_active(block=True, poll_interval=5)
+
+ def create_port_groups(self, vnf_ports, vm_instance):
+ '''
+ Creates a networking-sfc port pair and group
+ '''
+ logger.info("Creating the port pairs...")
+ port_pair = dict()
+ port_pair['name'] = vm_instance.name + '-connection-points'
+ port_pair['description'] = 'port pair for ' + vm_instance.name
+
+ # In the symmetric testcase ingres != egress (VNF has 2 interfaces)
+ if len(vnf_ports) == 1:
+ port_pair['ingress'] = vnf_ports[0].id
+ port_pair['egress'] = vnf_ports[0].id
+ elif len(vnf_ports) == 2:
+ port_pair['ingress'] = vnf_ports[0].id
+ port_pair['egress'] = vnf_ports[1].id
+ else:
+ logger.error("Only SFs with one or two ports are supported")
+ raise Exception("Failed to create port pairs")
+ port_pair_info = \
+ self.neutron_client.create_sfc_port_pair({'port_pair': port_pair})
+ if not port_pair_info:
+ logger.warning("Chain creation failed due to port pair "
+ "creation failed for vnf %(vnf)s",
+ {'vnf': vm_instance.name})
+ return None
+
+ # Avoid race conditions by checking the port pair is already committed
+ iterations = 5
+ found_it = False
+ for _ in range(iterations):
+ pp_list = self.neutron_client.list_sfc_port_pairs()['port_pairs']
+ for pp in pp_list:
+ if pp['id'] == port_pair_info['port_pair']['id']:
+ found_it = True
+ break
+ if found_it:
+ break
+ else:
+ time.sleep(3)
+
+ if not found_it:
+ raise Exception("Port pair was not committed in openstack")
+
+ logger.info("Creating the port pair groups for %s" % vm_instance.name)
+
+ port_pair_group = {}
+ port_pair_group['name'] = vm_instance.name + '-port-pair-group'
+ port_pair_group['description'] = \
+ 'port pair group for ' + vm_instance.name
+ port_pair_group['port_pairs'] = []
+ port_pair_group['port_pairs'].append(port_pair_info['port_pair']['id'])
+ ppg_config = {'port_pair_group': port_pair_group}
+ port_pair_group_info = \
+ self.neutron_client.create_sfc_port_pair_group(ppg_config)
+ if not port_pair_group_info:
+ logger.warning("Chain creation failed due to port pair group "
+ "creation failed for vnf "
+ "{}".format(vm_instance.name))
+ return None
+
+ return port_pair_group_info['port_pair_group']['id']
+
+ def create_classifier(self, neutron_port, port, protocol, fc_name,
+ symmetrical, server_port=None, server_ip=None):
+ '''
+ Create the classifier
+ '''
+ logger.info("Creating the classifier...")
+
+ if symmetrical:
+ sfc_classifier_params = {'name': fc_name,
+ 'destination_ip_prefix': server_ip,
+ 'logical_source_port': neutron_port,
+ 'logical_destination_port': server_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ else:
+ sfc_classifier_params = {'name': fc_name,
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+
+ fc_config = {'flow_classifier': sfc_classifier_params}
+ self.neutron_client.create_sfc_flow_classifier(fc_config)
+
+ def create_chain(self, port_groups, neutron_port, port, protocol,
+ vnffg_name, symmetrical, server_port=None,
+ server_ip=None):
+ '''
+ Create the classifier
+ '''
+ logger.info("Creating the classifier...")
+
+ if symmetrical:
+ sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'destination_ip_prefix': server_ip,
+ 'logical_source_port': neutron_port,
+ 'logical_destination_port': server_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ else:
+ sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+
+ fc_config = {'flow_classifier': sfc_classifier_params}
+ fc_info = \
+ self.neutron_client.create_sfc_flow_classifier(fc_config)
+
+ logger.info("Creating the chain...")
+ port_chain = {}
+ port_chain['name'] = vnffg_name + '-port-chain'
+ port_chain['description'] = 'port-chain for SFC'
+ port_chain['port_pair_groups'] = port_groups
+ port_chain['flow_classifiers'] = []
+ port_chain['flow_classifiers'].append(fc_info['flow_classifier']['id'])
+ if symmetrical:
+ port_chain['chain_parameters'] = {}
+ port_chain['chain_parameters']['symmetric'] = True
+ chain_config = {'port_chain': port_chain}
+ return self.neutron_client.create_sfc_port_chain(chain_config)
+
+ def update_chain(self, vnffg_name, fc_name, symmetrical):
+ '''
+ Update the new Flow Classifier ID
+ '''
+ fc_id = self.neutron_client.find_resource('flow_classifier',
+ fc_name)['id']
+ logger.info("Update the chain...")
+ port_chain = {}
+ port_chain['name'] = vnffg_name + '-port-chain'
+ port_chain['flow_classifiers'] = []
+ port_chain['flow_classifiers'].append(fc_id)
+ if symmetrical:
+ port_chain['chain_parameters'] = {}
+ port_chain['chain_parameters']['symmetric'] = True
+ chain_config = {'port_chain': port_chain}
+ pc_id = self.neutron_client.find_resource('port_chain',
+ port_chain['name'])['id']
+ return self.neutron_client.update_sfc_port_chain(pc_id, chain_config)
+
+ def swap_classifiers(self, vnffg_1_name, vnffg_2_name, symmetric=False):
+
+ '''
+ Swap Classifiers
+ '''
+ logger.info("Swap classifiers...")
+
+ self.update_chain(vnffg_1_name, 'dummy', symmetric)
+ vnffg_1_classifier_name = vnffg_1_name + '-classifier'
+ self.update_chain(vnffg_2_name, vnffg_1_classifier_name, symmetric)
+ vnffg_2_classifier_name = vnffg_2_name + '-classifier'
+ self.update_chain(vnffg_1_name, vnffg_2_classifier_name, symmetric)
+
+ def delete_port_groups(self):
+ '''
+ Delete all port groups and port pairs
+ '''
+ logger.info("Deleting the port groups...")
+ ppg_list = self.neutron_client.\
+ list_sfc_port_pair_groups()['port_pair_groups']
+ for ppg in ppg_list:
+ self.neutron_client.delete_sfc_port_pair_group(ppg['id'])
+
+ logger.info("Deleting the port pairs...")
+ pp_list = self.neutron_client.list_sfc_port_pairs()['port_pairs']
+ for pp in pp_list:
+ self.neutron_client.delete_sfc_port_pair(pp['id'])
+
+ def delete_chain(self):
+ '''
+ Delete the classifiers and the chains
+ '''
+ logger.info("Deleting the chain...")
+ pc_list = self.neutron_client.list_sfc_port_chains()['port_chains']
+ for pc in pc_list:
+ self.neutron_client.delete_sfc_port_chain(pc['id'])
+
+ logger.info("Deleting the classifiers...")
+ fc_list = self.neutron_client.\
+ list_sfc_flow_classifiers()['flow_classifiers']
+ for fc in fc_list:
+ self.neutron_client.delete_sfc_flow_classifier(fc['id'])
+# TACKER SECTION #
def get_tacker_client_version():
api_version = os.getenv('OS_TACKER_API_VERSION')
if api_version is not None:
@@ -219,7 +559,7 @@ def get_tacker_client_version():
def get_tacker_client(other_creds={}):
creds_override = None
os_creds = openstack_tests.get_credentials(
- os_env_file=CONST.__getattribute__('openstack_creds'),
+ os_env_file=constants.ENV_FILE,
overrides=creds_override)
sess = keystone_utils.keystone_session(os_creds)
return tackerclient.Client(get_tacker_client_version(), session=sess)
@@ -227,12 +567,12 @@ def get_tacker_client(other_creds={}):
def get_id_from_name(tacker_client, resource_type, resource_name):
try:
- req_params = {'fields': 'id', 'name': resource_name}
- endpoint = '/{0}s'.format(resource_type)
- resp = tacker_client.get(endpoint, params=req_params)
- endpoint = endpoint.replace('-', '_')
- return resp[endpoint[1:]][0]['id']
- except Exception, e:
+ params = {'fields': 'id', 'name': resource_name}
+ collection = resource_type + 's'
+ path = '/' + collection
+ resp = tacker_client.list(collection, path, **params)
+ return resp[collection][0]['id']
+ except Exception as e:
logger.error("Error [get_id_from_name(tacker_client, "
"resource_type, resource_name)]: %s" % e)
return None
@@ -280,22 +620,24 @@ def list_vnfds(tacker_client, verbose=False):
if not verbose:
vnfds = [vnfd['id'] for vnfd in vnfds['vnfds']]
return vnfds
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vnfds(tacker_client)]: %s" % e)
return None
def create_vnfd(tacker_client, tosca_file=None, vnfd_name=None):
+ logger.info("Creating the vnfd...")
try:
vnfd_body = {}
if tosca_file is not None:
with open(tosca_file) as tosca_fd:
- vnfd_body = tosca_fd.read()
- logger.info('VNFD template:\n{0}'.format(vnfd_body))
+ vnfd = tosca_fd.read()
+ vnfd_body = yaml.safe_load(vnfd)
+ logger.info('VNFD template:\n{0}'.format(vnfd))
return tacker_client.create_vnfd(
body={"vnfd": {"attributes": {"vnfd": vnfd_body},
"name": vnfd_name}})
- except Exception, e:
+ except Exception as e:
logger.error("Error [create_vnfd(tacker_client, '%s')]: %s"
% (tosca_file, e))
return None
@@ -309,7 +651,7 @@ def delete_vnfd(tacker_client, vnfd_id=None, vnfd_name=None):
raise Exception('You need to provide VNFD id or VNFD name')
vnfd = get_vnfd_id(tacker_client, vnfd_name)
return tacker_client.delete_vnfd(vnfd)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vnfd(tacker_client, '%s', '%s')]: %s"
% (vnfd_id, vnfd_name, e))
return None
@@ -321,13 +663,14 @@ def list_vnfs(tacker_client, verbose=False):
if not verbose:
vnfs = [vnf['id'] for vnf in vnfs['vnfs']]
return vnfs
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vnfs(tacker_client)]: %s" % e)
return None
def create_vnf(tacker_client, vnf_name, vnfd_id=None,
vnfd_name=None, vim_id=None, vim_name=None, param_file=None):
+ logger.info("Creating the vnf...")
try:
vnf_body = {
'vnf': {
@@ -356,7 +699,7 @@ def create_vnf(tacker_client, vnf_name, vnfd_id=None,
vnf_body['vnf']['vim_id'] = get_vim_id(tacker_client, vim_name)
return tacker_client.create_vnf(body=vnf_body)
- except Exception, e:
+ except Exception as e:
logger.error("error [create_vnf(tacker_client,"
" '%s', '%s', '%s')]: %s"
% (vnf_name, vnfd_id, vnfd_name, e))
@@ -376,18 +719,33 @@ def get_vnf(tacker_client, vnf_id=None, vnf_name=None):
else:
raise Exception('Could not retrieve ID from name [%s]' % vnf_name)
- except Exception, e:
+ except Exception as e:
logger.error("Could not retrieve VNF [vnf_id=%s, vnf_name=%s] - %s"
% (vnf_id, vnf_name, e))
return None
+def get_vnf_ip(tacker_client, vnf_id=None, vnf_name=None):
+ """
+ Get the management ip of the first VNF component as obtained from the
+ tacker REST API:
+ {
+ "vnf": {
+ ...
+ "mgmt_url": "{\"VDU1\": \"192.168.120.3\"}",
+ ...
+ }
+ """
+ vnf = get_vnf(tacker_client, vnf_id, vnf_name)
+ return json.loads(vnf['mgmt_url']).values()[0]
+
+
def wait_for_vnf(tacker_client, vnf_id=None, vnf_name=None, timeout=100):
try:
vnf = get_vnf(tacker_client, vnf_id, vnf_name)
if vnf is None:
raise Exception("Could not retrieve VNF - id='%s', name='%s'"
- % vnf_id, vnf_name)
+ % (vnf_id, vnf_name))
logger.info('Waiting for vnf {0}'.format(str(vnf)))
while vnf['status'] != 'ACTIVE' and timeout >= 0:
if vnf['status'] == 'ERROR':
@@ -401,7 +759,7 @@ def wait_for_vnf(tacker_client, vnf_id=None, vnf_name=None, timeout=100):
raise Exception('Timeout when booting vnf %s' % vnf['id'])
return vnf['id']
- except Exception, e:
+ except Exception as e:
logger.error("error [wait_for_vnf(tacker_client, '%s', '%s')]: %s"
% (vnf_id, vnf_name, e))
return None
@@ -415,13 +773,14 @@ def delete_vnf(tacker_client, vnf_id=None, vnf_name=None):
raise Exception('You need to provide a VNF id or name')
vnf = get_vnf_id(tacker_client, vnf_name)
return tacker_client.delete_vnf(vnf)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vnf(tacker_client, '%s', '%s')]: %s"
% (vnf_id, vnf_name, e))
return None
def create_vim(tacker_client, vim_file=None):
+ logger.info("Creating the vim...")
try:
vim_body = {}
if vim_file is not None:
@@ -429,38 +788,42 @@ def create_vim(tacker_client, vim_file=None):
vim_body = json.load(vim_fd)
logger.info('VIM template:\n{0}'.format(vim_body))
return tacker_client.create_vim(body=vim_body)
- except Exception, e:
+ except Exception as e:
logger.error("Error [create_vim(tacker_client, '%s')]: %s"
% (vim_file, e))
return None
def create_vnffgd(tacker_client, tosca_file=None, vnffgd_name=None):
+ logger.info("Creating the vnffgd...")
try:
vnffgd_body = {}
if tosca_file is not None:
with open(tosca_file) as tosca_fd:
- vnffgd_body = yaml.safe_load(tosca_fd)
- logger.info('VNFFGD template:\n{0}'.format(vnffgd_body))
+ vnffgd = tosca_fd.read()
+ vnffgd_body = yaml.safe_load(vnffgd)
+ logger.info('VNFFGD template:\n{0}'.format(vnffgd))
return tacker_client.create_vnffgd(
body={'vnffgd': {'name': vnffgd_name,
'template': {'vnffgd': vnffgd_body}}})
- except Exception, e:
+ except Exception as e:
logger.error("Error [create_vnffgd(tacker_client, '%s')]: %s"
% (tosca_file, e))
return None
def create_vnffg(tacker_client, vnffg_name=None, vnffgd_id=None,
- vnffgd_name=None, param_file=None):
+ vnffgd_name=None, param_file=None, symmetrical=False):
'''
Creates the vnffg which will provide the RSP and the classifier
'''
+ logger.info("Creating the vnffg...")
try:
vnffg_body = {
'vnffg': {
'attributes': {},
- 'name': vnffg_name
+ 'name': vnffg_name,
+ 'symmetrical': symmetrical
}
}
if param_file is not None:
@@ -477,7 +840,7 @@ def create_vnffg(tacker_client, vnffg_name=None, vnffgd_id=None,
vnffg_body['vnffg']['vnffgd_id'] = get_vnffgd_id(tacker_client,
vnffgd_name)
return tacker_client.create_vnffg(body=vnffg_body)
- except Exception, e:
+ except Exception as e:
logger.error("error [create_vnffg(tacker_client,"
" '%s', '%s', '%s')]: %s"
% (vnffg_name, vnffgd_id, vnffgd_name, e))
@@ -490,7 +853,7 @@ def list_vnffgds(tacker_client, verbose=False):
if not verbose:
vnffgds = [vnffgd['id'] for vnffgd in vnffgds['vnffgds']]
return vnffgds
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vnffgds(tacker_client)]: %s" % e)
return None
@@ -501,7 +864,7 @@ def list_vnffgs(tacker_client, verbose=False):
if not verbose:
vnffgs = [vnffg['id'] for vnffg in vnffgs['vnffgs']]
return vnffgs
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vnffgs(tacker_client)]: %s" % e)
return None
@@ -514,7 +877,7 @@ def delete_vnffg(tacker_client, vnffg_id=None, vnffg_name=None):
raise Exception('You need to provide a VNFFG id or name')
vnffg = get_vnffg_id(tacker_client, vnffg_name)
return tacker_client.delete_vnffg(vnffg)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vnffg(tacker_client, '%s', '%s')]: %s"
% (vnffg_id, vnffg_name, e))
return None
@@ -528,7 +891,7 @@ def delete_vnffgd(tacker_client, vnffgd_id=None, vnffgd_name=None):
raise Exception('You need to provide VNFFGD id or VNFFGD name')
vnffgd = get_vnffgd_id(tacker_client, vnffgd_name)
return tacker_client.delete_vnffgd(vnffgd)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vnffgd(tacker_client, '%s', '%s')]: %s"
% (vnffgd_id, vnffgd_name, e))
return None
@@ -540,7 +903,7 @@ def list_vims(tacker_client, verbose=False):
if not verbose:
vims = [vim['id'] for vim in vims['vims']]
return vims
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vims(tacker_client)]: %s" % e)
return None
@@ -553,7 +916,7 @@ def delete_vim(tacker_client, vim_id=None, vim_name=None):
raise Exception('You need to provide VIM id or VIM name')
vim = get_vim_id(tacker_client, vim_name)
return tacker_client.delete_vim(vim)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vim(tacker_client, '%s', '%s')]: %s"
% (vim_id, vim_name, e))
return None
@@ -574,17 +937,15 @@ def register_vim(tacker_client, vim_file=None):
with open(vim_file) as f:
json_dict = json.load(f)
- json_dict['vim']['auth_url'] = CONST.__getattribute__('OS_AUTH_URL')
- json_dict['vim']['auth_cred']['password'] = CONST.__getattribute__(
- 'OS_PASSWORD')
+ json_dict['vim']['auth_url'] = os.environ['OS_AUTH_URL']
+ json_dict['vim']['auth_cred']['password'] = os.environ['OS_PASSWORD']
json.dump(json_dict, open(tmp_file, 'w'))
create_vim(tacker_client, vim_file=tmp_file)
-def create_vnf_in_av_zone(
- tacker_client,
+def create_vnf_in_av_zone(tacker_client,
vnf_name,
vnfd_name,
vim_name,
@@ -596,9 +957,7 @@ def create_vnf_in_av_zone(
param_file = os.path.join(
'/tmp',
'param_{0}.json'.format(av_zone.replace('::', '_')))
- data = {
- 'zone': av_zone
- }
+ data = {'zone': av_zone}
with open(param_file, 'w+') as f:
json.dump(data, f)
create_vnf(tacker_client,
@@ -609,19 +968,28 @@ def create_vnf_in_av_zone(
def create_vnffg_with_param_file(tacker_client, vnffgd_name, vnffg_name,
- default_param_file, neutron_port):
+ default_param_file, client_port,
+ server_port=None, server_ip=None):
param_file = default_param_file
-
- if neutron_port is not None:
+ data = {}
+ if client_port:
+ data['net_src_port_id'] = client_port
+ if server_port:
+ data['net_dst_port_id'] = server_port
+ if server_ip:
+ data['ip_dst_prefix'] = server_ip
+
+ if client_port is not None or server_port is not None:
param_file = os.path.join(
'/tmp',
- 'param_{0}.json'.format(neutron_port))
- data = {
- 'net_src_port_id': neutron_port
- }
+ 'param_{0}.json'.format(vnffg_name))
with open(param_file, 'w+') as f:
json.dump(data, f)
+
+ symmetrical = True if client_port and server_port else False
+
create_vnffg(tacker_client,
vnffgd_name=vnffgd_name,
vnffg_name=vnffg_name,
- param_file=param_file)
+ param_file=param_file,
+ symmetrical=symmetrical)
diff --git a/sfc/lib/results.py b/sfc/lib/results.py
index 15d82e02..2f2edfc0 100644
--- a/sfc/lib/results.py
+++ b/sfc/lib/results.py
@@ -7,7 +7,6 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-
import logging
logger = logging.getLogger(__name__)
diff --git a/sfc/lib/test_utils.py b/sfc/lib/test_utils.py
index 9cdc02b2..ed50c390 100644
--- a/sfc/lib/test_utils.py
+++ b/sfc/lib/test_utils.py
@@ -10,10 +10,9 @@
import os
import subprocess
import time
-
+import shutil
+import urllib
import logging
-import functest.utils.functest_utils as ft_utils
-
logger = logging.getLogger(__name__)
SSH_OPTIONS = '-q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
@@ -51,13 +50,29 @@ def run_cmd_remote(ip, cmd, username="root", passwd="opnfv"):
return run_cmd(ssh_cmd)
+def download_url(url, dest_path):
+ """
+ Download a file to a destination path given a URL
+ """
+ name = url.rsplit('/')[-1]
+ dest = dest_path + "/" + name
+ try:
+ response = urllib.urlopen(url)
+ except Exception:
+ return False
+
+ with open(dest, 'wb') as lfile:
+ shutil.copyfileobj(response, lfile)
+ return True
+
+
def download_image(url, image_path):
image_filename = os.path.basename(image_path)
image_url = "%s/%s" % (url, image_filename)
image_dir = os.path.dirname(image_path)
if not os.path.isfile(image_path):
logger.info("Downloading image")
- ft_utils.download_url(image_url, image_dir)
+ download_url(image_url, image_dir)
else:
logger.info("Using old image")
@@ -95,7 +110,7 @@ def start_http_server(ip, iterations_check=10):
logger.info(output)
while iterations_check > 0:
- _, output, _ = run_cmd_remote(ip, "ss -na | grep *:80")
+ _, output, _ = run_cmd_remote(ip, "netstat -pntl | grep :80")
if output:
return True
else:
@@ -107,17 +122,20 @@ def start_http_server(ip, iterations_check=10):
return False
-def start_vxlan_tool(remote_ip, interface="eth0", block=None):
+def start_vxlan_tool(remote_ip, interface="eth0", output=None, block=None):
"""
Starts vxlan_tool on a remote host.
vxlan_tool.py converts a regular Service Function into a NSH-aware SF
when the "--do forward" option is used, it decrements the NSI appropiately.
- 'block' parameters allows to specify a port where packets will be dropped.
+ 'output' allows to specify an interface through which to forward if
+ different than the input interface.
+ 'block' parameter allows to specify a port where packets will be dropped.
"""
command = "nohup python /root/vxlan_tool.py"
- options = "{do} {interface} {block_option}".format(
+ options = "{do} {interface} {output_option} {block_option}".format(
do="--do forward",
interface="--interface {}".format(interface),
+ output_option="--output {}".format(output) if output else "",
block_option="--block {}".format(block) if block is not None else "")
output_redirection = "> /dev/null 2>&1"
@@ -212,11 +230,11 @@ def check_ssh(ips, retries=100):
def fill_installer_dict(installer_type):
- default_string = "defaults.installer.{}.".format(installer_type)
- installer_yaml_fields = {
- "user": default_string+"user",
- "password": default_string+"password",
- "cluster": default_string+"cluster",
- "pkey_file": default_string+"pkey_file"
- }
- return installer_yaml_fields
+ default_string = "defaults.installer.{}.".format(installer_type)
+ installer_yaml_fields = {
+ "user": default_string+"user",
+ "password": default_string+"password",
+ "cluster": default_string+"cluster",
+ "pkey_file": default_string+"pkey_file"
+ }
+ return installer_yaml_fields
diff --git a/sfc/tests/NAME_tests.py b/sfc/tests/NAME_tests.py
deleted file mode 100644
index e95004bc..00000000
--- a/sfc/tests/NAME_tests.py
+++ /dev/null
@@ -1,11 +0,0 @@
-
-def setup():
- print "SETUP!"
-
-
-def teardown():
- print "TEAR DOWN!"
-
-
-def test_basic():
- print "I RAN!"
diff --git a/sfc/tests/functest/README.tests b/sfc/tests/functest/README.tests
index d4e3df3e..f39d8888 100644
--- a/sfc/tests/functest/README.tests
+++ b/sfc/tests/functest/README.tests
@@ -34,15 +34,36 @@ will be running a firewall that blocks the traffic in a specific port (e.g.
33333). A symmetric service chain routing the traffic throught this SF will be
created as well.
-1st check: The client is able to reach the server using a source port different
-from the one that the firewall blocks (e.g 22222), and the response gets back
-to the client.
+1st check: The client is able to reach the server and the response gets back
+to the client. Here the firewall is running without blocking any port.
-2nd check: The client is able to reach the server using the source port that
-the firewall blocks, but responses back from the server are blocked, as the
-symmetric service chain makes them go through the firewall that blocks on the
-destination port initially used as source port by the client (e.g. 33333).
+2nd check: The client is not able to reach the server as the firewall
+is configured to block traffic on port 80, and the request from the client
+is blocked, as the symmetric service chain makes them go through the firewall.
-If the client is able to receive the response, it would be a symptom of the
+If the client is able to reach the server, it would be a symptom of the
+symmetric chain not working, as traffic would be flowing from client to server
+directly without traversing the SF.
+
+3rd check: The client is able to reach the server, as the firewall
+is configured to block traffic on port 22222, and the response from the server
+is blocked.
+
+If the server is able to reach the client, it would be a symptom of the
symmetric chain not working, as traffic would be flowing from server to client
directly without traversing the SF.
+
+4th check: The client is able to reach the server and the response gets back
+to the client. Like in 1st check to verify test ends with same config
+as at the beginning.
+
+
+## TEST DELETION - sfc_chain_deletion ##
+
+One client and one server are created using nova. Then a SF is created using tacker.
+A service chain which routes the traffic through this SF will be created as well.
+After that the chain is deleted and re-created.
+
+vxlan_tool is started in the SF and HTTP traffic is sent from the client to the server.
+If it works, the vxlan_tool is modified to block HTTP traffic.
+It is tried again and it should fail because packets are dropped. \ No newline at end of file
diff --git a/sfc/tests/functest/config.yaml b/sfc/tests/functest/config.yaml
index be37e626..021b4c39 100644
--- a/sfc/tests/functest/config.yaml
+++ b/sfc/tests/functest/config.yaml
@@ -2,10 +2,10 @@
defaults:
# odl-sfc uses custom flavors as per below params
flavor: custom
- ram_size_in_mb: 1500
- disk_size_in_gb: 10
+ ram_size_in_mb: 500
+ disk_size_in_gb: 1
vcpu_count: 1
- image_name: sfc_nsh_euphrates
+ image_name: sfc_nsh_fraser
installer:
fuel:
user: root
@@ -17,14 +17,26 @@ defaults:
osa:
user: root
pkey_file: "/root/.ssh/id_rsa"
+ compass:
+ user: root
+ pkey_file: "/root/.ssh/id_rsa"
image_format: qcow2
- image_url: "http://artifacts.opnfv.org/sfc/images/sfc_nsh_euphrates.qcow2"
+ image_url: "http://artifacts.opnfv.org/sfc/images/sfc_nsh_fraser.qcow2"
vnfd-dir: "vnfd-templates"
vnfd-default-params-file: "test-vnfd-default-params.yaml"
+ # mano_component can be [tacker, no-mano]. When no-mano,
+ # then networking-sfc is used
+ mano_component: "no-mano"
+
+ # [OPTIONAL] Only when deploying VNFs without the default image (vxlan_tool)
+ # vnf_image_name: xxx
+ # vnf_image_format: yyy
+ # vnf_image_url: zzz
testcases:
sfc_one_chain_two_service_functions:
+ class_name: "SfcOneChainTwoServiceTC"
enabled: true
order: 0
description: "ODL-SFC Testing SFs when they are located on the same chain"
@@ -34,12 +46,21 @@ testcases:
subnet_cidr: "11.0.0.0/24"
secgroup_name: "example-sg"
secgroup_descr: "Example Security group"
+ vnf_names:
+ - 'testVNF1'
+ - 'testVNF2'
+ supported_installers:
+ - 'fuel'
+ - 'apex'
+ - 'osa'
+ - 'compass'
test_vnfd_red: "test-one-chain-vnfd1.yaml"
test_vnfd_blue: "test-one-chain-vnfd2.yaml"
test_vnffgd_red: "test-one-chain-vnffgd.yaml"
sfc_two_chains_SSH_and_HTTP:
- enabled: false
+ class_name: "SfcTwoChainsSSHandHTTP"
+ enabled: true
order: 1
description: "ODL-SFC tests with two chains and one SF per chain"
net_name: example-net
@@ -48,13 +69,22 @@ testcases:
subnet_cidr: "11.0.0.0/24"
secgroup_name: "example-sg"
secgroup_descr: "Example Security group"
+ vnf_names:
+ - 'testVNF1'
+ - 'testVNF2'
+ supported_installers:
+ - 'fuel'
+ - 'apex'
+ - 'osa'
+ - 'compass'
test_vnfd_red: "test-two-chains-vnfd1.yaml"
test_vnfd_blue: "test-two-chains-vnfd2.yaml"
test_vnffgd_red: "test-two-chains-vnffgd1.yaml"
test_vnffgd_blue: "test-two-chains-vnffgd2.yaml"
sfc_symmetric_chain:
- enabled: false
+ class_name: "SfcSymmetricChain"
+ enabled: true
order: 2
description: "Verify the behavior of a symmetric service chain"
net_name: example-net
@@ -63,6 +93,34 @@ testcases:
subnet_cidr: "11.0.0.0/24"
secgroup_name: "example-sg"
secgroup_descr: "Example Security group"
+ vnf_names:
+ - 'testVNF1'
+ supported_installers:
+ - 'fuel'
+ - 'apex'
+ - 'osa'
+ - 'compass'
test_vnfd: "test-symmetric-vnfd.yaml"
- allowed_source_port: 22222
- blocked_source_port: 33333
+ test_vnffgd: "test-symmetric-vnffgd.yaml"
+ source_port: 22222
+
+ sfc_chain_deletion:
+ class_name: "SfcChainDeletion"
+ enabled: true
+ order: 3
+ description: "Verify if chains work correctly after deleting one"
+ net_name: example-net
+ subnet_name: example-subnet
+ router_name: example-router
+ subnet_cidr: "11.0.0.0/24"
+ secgroup_name: "example-sg"
+ secgroup_descr: "Example Security group"
+ vnf_names:
+ - 'testVNF1'
+ supported_installers:
+ - 'fuel'
+ - 'apex'
+ - 'osa'
+ - 'compass'
+ test_vnfd_red: "test-one-chain-vnfd1.yaml"
+ test_vnffgd_red: "test-deletion-vnffgd.yaml"
diff --git a/sfc/tests/functest/pod.yaml.sample b/sfc/tests/functest/pod.yaml.sample
new file mode 100644
index 00000000..aa5fddad
--- /dev/null
+++ b/sfc/tests/functest/pod.yaml.sample
@@ -0,0 +1,58 @@
+# Sample config file about the POD information is located under the dovetail project.
+# https://github.com/opnfv/dovetail/blob/master/etc/userconfig/pod.yaml.sample
+# On the top of the above template the node0 could be used, defining the role Host.
+# After that the proper number of controller nodes should be defined and
+# at the end the respective compute nodes.
+
+nodes:
+-
+ # This can not be changed and must be node0.
+ name: node0
+
+ # This must be Host.
+ role: Host
+
+ # This is the instance IP of a node which has installed.
+ ip: xx.xx.xx.xx
+
+ # User name of the user of this node. This user **must** have sudo privileges.
+ user: root
+
+ # keyfile of the user.
+ key_filename: /root/.ssh/id_rsa
+
+-
+ # This can not be changed and must be node1.
+ name: node1
+
+ # This must be controller.
+ role: Controller
+
+ # This is the instance IP of a controller node
+ ip: xx.xx.xx.xx
+
+ # User name of the user of this node. This user **must** have sudo privileges.
+ user: root
+
+ # keyfile of the user.
+ key_filename: /root/.ssh/id_rsa
+
+-
+ # This can not be changed and must be node1.
+ name: node2
+
+ # This must be compute.
+ role: Compute
+
+ # This is the instance IP of a compute node
+ ip: xx.xx.xx.xx
+
+ # User name of the user of this node. This user **must** have sudo privileges.
+ user: root
+
+ # keyfile of the user.
+ key_filename: /root/.ssh/id_rsa
+
+ # Private ssh key for accessing the controller nodes. If there is not
+ # a keyfile for that use, the password of the user could be used instead.
+ # password: root \ No newline at end of file
diff --git a/sfc/tests/functest/register-vim.json b/sfc/tests/functest/register-vim.json
index 00719449..342fd337 100644
--- a/sfc/tests/functest/register-vim.json
+++ b/sfc/tests/functest/register-vim.json
@@ -9,7 +9,8 @@
"username": "admin",
"user_domain_name": "Default",
"password": "",
- "user_id": ""
+ "user_id": "",
+ "cert_verify": "False"
},
"auth_url": "",
"type": "openstack",
diff --git a/sfc/tests/functest/register-vim.json-queens b/sfc/tests/functest/register-vim.json-queens
new file mode 100644
index 00000000..342fd337
--- /dev/null
+++ b/sfc/tests/functest/register-vim.json-queens
@@ -0,0 +1,19 @@
+{
+ "vim": {
+ "vim_project": {
+ "project_domain_name": "Default",
+ "id": "",
+ "name": "admin"
+ },
+ "auth_cred": {
+ "username": "admin",
+ "user_domain_name": "Default",
+ "password": "",
+ "user_id": "",
+ "cert_verify": "False"
+ },
+ "auth_url": "",
+ "type": "openstack",
+ "name": "test-vim"
+ }
+}
diff --git a/sfc/tests/functest/run_sfc_tests.py b/sfc/tests/functest/run_sfc_tests.py
index a1e73040..7f0eaa8a 100644
--- a/sfc/tests/functest/run_sfc_tests.py
+++ b/sfc/tests/functest/run_sfc_tests.py
@@ -1,4 +1,4 @@
-#!/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
@@ -11,18 +11,17 @@
import importlib
import os
import time
+import logging
import sys
import yaml
-from functest.core import testcase
+from collections import OrderedDict
from opnfv.utils import ovs_logger as ovs_log
from opnfv.deployment.factory import Factory as DeploymentFactory
from sfc.lib import cleanup as sfc_cleanup
from sfc.lib import config as sfc_config
from sfc.lib import odl_utils as odl_utils
-
-from collections import OrderedDict
-import logging
+from xtesting.core import testcase
logger = logging.getLogger(__name__)
COMMON_CONFIG = sfc_config.CommonConfig()
@@ -30,6 +29,13 @@ COMMON_CONFIG = sfc_config.CommonConfig()
class SfcFunctest(testcase.TestCase):
+ def __init__(self, **kwargs):
+ super(SfcFunctest, self).__init__(**kwargs)
+
+ self.cleanup_flag = True
+ if '--nocleanup' in sys.argv:
+ self.cleanup_flag = False
+
def __fetch_tackerc_file(self, controller_node):
rc_file = os.path.join(COMMON_CONFIG.sfc_test_dir, 'tackerc')
if not os.path.exists(rc_file):
@@ -98,35 +104,49 @@ class SfcFunctest(testcase.TestCase):
time.sleep(10)
def __disable_heat_resource_finder_cache(self, nodes, installer_type):
- controllers = [node for node in nodes if node.is_controller()]
+
+ if COMMON_CONFIG.installer_type != 'configByUser':
+ controllers = [node for node in nodes if node.is_controller()]
+ else:
+ controllers = []
+ for n in COMMON_CONFIG.nodes_pod:
+ if n['role'] == 'Controller':
+ controllers.append(n)
+ logger.info("CONTROLLER : %s", controllers)
if installer_type == 'apex':
self.__disable_heat_resource_finder_cache_apex(controllers)
elif installer_type == "fuel":
self.__disable_heat_resource_finder_cache_fuel(controllers)
- elif installer_type == "osa":
+ elif installer_type == "osa" or "compass" or "configByUser":
pass
else:
raise Exception('Unsupported installer')
def run(self):
- deploymentHandler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
-
cluster = COMMON_CONFIG.installer_cluster
- nodes = (deploymentHandler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deploymentHandler.get_nodes())
+ if COMMON_CONFIG.installer_type != 'configByUser':
+ deploymentHandler = DeploymentFactory.get_handler(
+ COMMON_CONFIG.installer_type,
+ COMMON_CONFIG.installer_ip,
+ COMMON_CONFIG.installer_user,
+ COMMON_CONFIG.installer_password,
+ COMMON_CONFIG.installer_key_file)
+
+ nodes = (deploymentHandler.get_nodes({'cluster': cluster})
+ if cluster is not None
+ else deploymentHandler.get_nodes())
+ self.__disable_heat_resource_finder_cache(nodes,
+ COMMON_CONFIG.
+ installer_type)
+ odl_ip, odl_port = odl_utils.get_odl_ip_port(nodes)
- self.__disable_heat_resource_finder_cache(nodes,
- COMMON_CONFIG.installer_type)
-
- odl_ip, odl_port = odl_utils.get_odl_ip_port(nodes)
+ else:
+ nodes = COMMON_CONFIG.nodes_pod
+ self.__disable_heat_resource_finder_cache(nodes, "configByUser")
+ odl_ip, odl_port = odl_utils. \
+ get_odl_ip_port_no_installer(COMMON_CONFIG.nodes_pod)
ovs_logger = ovs_log.OVSLogger(
os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
@@ -149,16 +169,33 @@ class SfcFunctest(testcase.TestCase):
(test_name, test_descr))
logger.info(title)
logger.info("%s\n" % ("=" * len(title)))
- t = importlib.import_module(
+ module = importlib.import_module(
"sfc.tests.functest.{0}".format(test_name),
package=None)
+
+ testcase_config = sfc_config.TestcaseConfig(test_name)
+ supported_installers = test_cfg['supported_installers']
+ vnf_names = test_cfg['vnf_names']
+
+ tc_class = getattr(module, test_cfg['class_name'])
+ tc_instance = tc_class(testcase_config, supported_installers,
+ vnf_names)
+ cleanup_run_flag = False
start_time = time.time()
try:
- result, creators = t.main()
+ result, creators = tc_instance.run()
except Exception as e:
logger.error("Exception when executing: %s" % test_name)
logger.error(e)
result = {'status': 'FAILED'}
+ creators = tc_instance.get_creators()
+ if self.cleanup_flag is True:
+ sfc_cleanup.cleanup(testcase_config,
+ creators,
+ COMMON_CONFIG.mano_component,
+ odl_ip=odl_ip,
+ odl_port=odl_port)
+ cleanup_run_flag = True
end_time = time.time()
duration = end_time - start_time
logger.info("Results of test case '%s - %s':\n%s\n" %
@@ -176,7 +213,13 @@ class SfcFunctest(testcase.TestCase):
dic = {"duration": duration, "status": status}
self.details.update({test_name: dic})
- sfc_cleanup.cleanup(creators, odl_ip=odl_ip, odl_port=odl_port)
+
+ if cleanup_run_flag is not True and self.cleanup_flag is True:
+ sfc_cleanup.cleanup(testcase_config,
+ creators,
+ COMMON_CONFIG.mano_component,
+ odl_ip=odl_ip,
+ odl_port=odl_port)
self.stop_time = time.time()
@@ -187,8 +230,7 @@ class SfcFunctest(testcase.TestCase):
return testcase.TestCase.EX_RUN_ERROR
-if __name__ == '__main__':
- logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s'
- '- %(levelname)s - %(message)s')
+def main():
+ logging.basicConfig(level=logging.INFO)
SFC = SfcFunctest()
sys.exit(SFC.run())
diff --git a/sfc/tests/functest/setup_scripts/compute_presetup_CI.bash b/sfc/tests/functest/setup_scripts/compute_presetup_CI.bash
deleted file mode 100644
index 36148aa1..00000000
--- a/sfc/tests/functest/setup_scripts/compute_presetup_CI.bash
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-# This script must be use with vxlan-gpe + nsh. Once we have eth + nsh support
-# in ODL, we will not need it anymore
-
-set -e
-ssh_options='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-BASEDIR=`dirname $0`
-INSTALLER_IP=${INSTALLER_IP:-10.20.0.2}
-
-pushd $BASEDIR
-#ip=`sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'fuel node'|grep compute|\
-#awk '{print $10}' | head -1`
-
-ip=$1
-echo $ip
-#sshpass -p r00tme scp $ssh_options correct_classifier.bash ${INSTALLER_IP}:/root
-#sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'scp correct_classifier.bash '"$ip"':/root'
-
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ifconfig br-int up'
-output=$(sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ip route | \
-cut -d" " -f1 | grep 11.0.0.0' ; exit 0)
-
-if [ -z "$output" ]; then
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ip route add 11.0.0.0/24 \
-dev br-int'
-fi
diff --git a/sfc/tests/functest/setup_scripts/delete.sh b/sfc/tests/functest/setup_scripts/delete.sh
deleted file mode 100644
index 3333c52b..00000000
--- a/sfc/tests/functest/setup_scripts/delete.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-# Remember to source the env variables $creds before
-FILE=$(readlink -f $0)
-FILE_PATH=$(dirname $FILE)
-cd $FILE_PATH
-python ../../../lib/cleanup.py $1 $2
-openstack server delete client
-openstack server delete server
-for line in $(openstack floating ip list);do openstack floating ip delete $line;done
diff --git a/sfc/tests/functest/setup_scripts/delete_symmetric.sh b/sfc/tests/functest/setup_scripts/delete_symmetric.sh
deleted file mode 100644
index b0aa4d81..00000000
--- a/sfc/tests/functest/setup_scripts/delete_symmetric.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-# Remember to source the env variables $creds before
-tacker sfc-classifier-delete red_http
-tacker sfc-classifier-delete red_http_reverse
-tacker sfc-delete red
-tacker vnf-delete testVNF1
-tacker vnfd-delete test-vnfd1
-nova delete client
-nova delete server
-for line in $(neutron floatingip-list | cut -d" " -f2);do neutron floatingip-delete $line;done
diff --git a/sfc/tests/functest/setup_scripts/prepare_odl_sfc.py b/sfc/tests/functest/setup_scripts/prepare_odl_sfc.py
deleted file mode 100644
index 1ddf36a6..00000000
--- a/sfc/tests/functest/setup_scripts/prepare_odl_sfc.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#
-# Author: George Paraskevopoulos (geopar@intracom-telecom.com)
-# Manuel Buil (manuel.buil@ericsson.com)
-# Prepares the controller and the compute nodes for the odl-sfc testcase
-#
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-import os
-import paramiko
-import subprocess
-import sys
-
-import functest.utils.functest_logger as ft_logger
-
-
-logger = ft_logger.Logger(__name__).getLogger()
-
-SFC_REPO_DIR = "/home/opnfv/repos/sfc"
-
-try:
- INSTALLER_IP = os.environ['INSTALLER_IP']
-except:
- logger.debug("INSTALLER_IP does not exist. We create 10.20.0.2")
- INSTALLER_IP = "10.20.0.2"
-
-os.environ['ODL_SFC_LOG'] = "/home/opnfv/functest/results/sfc.log"
-os.environ['ODL_SFC_DIR'] = os.path.join(SFC_REPO_DIR,
- "sfc/tests/functest")
-SETUP_SCRIPTS_DIR = os.path.join(os.environ['ODL_SFC_DIR'], 'setup_scripts')
-
-command = SETUP_SCRIPTS_DIR + ("/server_presetup_CI.bash | "
- "tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1")
-
-output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-# for line in iter(output.stdout.readline, ''):
-# i = line.rstrip()
-# print(i)
-
-# Make sure the process is finished before checking the returncode
-if not output.poll():
- output.wait()
-
-# Get return value
-if output.returncode:
- print("The presetup of the server did not work")
- sys.exit(output.returncode)
-
-logger.info("The presetup of the server worked ")
-
-ssh_options = "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
-ssh = paramiko.SSHClient()
-ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-
-try:
- ssh.connect(INSTALLER_IP, username="root",
- password="r00tme", timeout=2)
- command = "fuel node | grep compute | awk '{print $10}'"
- logger.info("Executing ssh to collect the compute IPs")
- (stdin, stdout, stderr) = ssh.exec_command(command)
-except:
- logger.debug("Something went wrong in the ssh to collect the computes IP")
-
-output = stdout.readlines()
-for ip in output:
- command = SETUP_SCRIPTS_DIR + ("/compute_presetup_CI.bash " + ip.rstrip() +
- "| tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1")
-
- output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-# for line in iter(output.stdout.readline, ''):
-# print(line)
-# sys.stdout.flush()
-
- output.stdout.close()
-
- if not (output.poll()):
- output.wait()
-
- # Get return value
- if output.returncode:
- print("The compute config did not work on compute %s" % ip)
- sys.exit(output.returncode)
-
-sys.exit(0)
diff --git a/sfc/tests/functest/setup_scripts/server_presetup_CI.bash b/sfc/tests/functest/setup_scripts/server_presetup_CI.bash
deleted file mode 100644
index 240353f5..00000000
--- a/sfc/tests/functest/setup_scripts/server_presetup_CI.bash
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-set -e
-ssh_options='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-BASEDIR=`dirname $0`
-INSTALLER_IP=${INSTALLER_IP:-10.20.0.2}
-
-pushd $BASEDIR
-ip=$(sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'fuel node'|grep controller|awk '{print $10}' | head -1)
-echo $ip
-
-sshpass -p r00tme scp $ssh_options delete.sh ${INSTALLER_IP}:/root
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'scp '"$ip"':/root/tackerc .'
-sshpass -p r00tme scp $ssh_options ${INSTALLER_IP}:/root/tackerc $BASEDIR
diff --git a/sfc/tests/functest/sfc_chain_deletion.py b/sfc/tests/functest/sfc_chain_deletion.py
new file mode 100644
index 00000000..5f73d0c7
--- /dev/null
+++ b/sfc/tests/functest/sfc_chain_deletion.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import threading
+import logging
+import urllib3
+
+import sfc.lib.odl_utils as odl_utils
+import sfc.lib.config as sfc_config
+import sfc.lib.test_utils as test_utils
+from sfc.tests.functest import sfc_parent_function
+
+logger = logging.getLogger(__name__)
+
+
+class SfcChainDeletion(sfc_parent_function.SfcCommonTestCase):
+ """We create one client and one server using nova.
+ Then, a SF is created using tacker.
+ A service chain routing the traffic
+ throught this SF will be created as well.
+ After that the chain is deleted and re-created.
+ Finally, the vxlan tool is used in order to check a single
+ HTTP traffic scenario.
+ """
+ def run(self):
+
+ logger.info("The test scenario %s is starting", __name__)
+ self.register_vnf_template(self.testcase_config.test_vnfd_red,
+ 'test-vnfd1')
+ self.create_vnf(self.vnfs[0], 'test-vnfd1', 'test-vim')
+
+ self.create_vnffg(self.testcase_config.test_vnffgd_red, 'red',
+ 'red_http', port=80, protocol='tcp', symmetric=False)
+
+ t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.hypervisor_hostname,
+ [self.neutron_port],))
+ try:
+ t1.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
+
+ logger.info("Assigning floating IPs to instances")
+
+ self.assign_floating_ip_client_server()
+ self.assign_floating_ip_sfs()
+
+ self.check_floating_ips()
+
+ self.start_services_in_vm()
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t1.join()
+
+ self.remove_vnffg('red_http', 'red')
+ self.check_deletion()
+
+ self.create_vnffg(self.testcase_config.test_vnffgd_red, 'blue',
+ 'blue_http', port=80, protocol='tcp',
+ symmetric=False)
+
+ t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.hypervisor_hostname,
+ [self.neutron_port],))
+ try:
+ t2.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
+
+ logger.info("Starting SSH firewall on %s" % self.fips_sfs[0])
+ test_utils.start_vxlan_tool(self.fips_sfs[0])
+
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t2.join()
+
+ logger.info("Test HTTP")
+ results = self.present_results_allowed_http()
+
+ self.vxlan_blocking_start(self.fips_sfs[0], "80")
+
+ logger.info("Test HTTP again")
+ results = self.present_results_http()
+
+ if __name__ == '__main__':
+ return results.compile_summary(), self.creators
+
+ if __name__ == 'sfc.tests.functest.sfc_chain_deletion':
+ return results.compile_summary(), self.creators
+
+ def get_creators(self):
+ """Return the creators info, specially in case the info is not
+ returned due to an exception.
+
+ :return: creators
+ """
+ return self.creators
+
+
+if __name__ == '__main__':
+
+ # Disable InsecureRequestWarning errors when executing the SFC tests in XCI
+ urllib3.disable_warnings()
+
+ TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_chain_deletion')
+ supported_installers = ['fuel', 'apex', 'osa', 'compass']
+ vnf_names = ['testVNF1', 'testVNF2']
+
+ test_run = SfcChainDeletion(TESTCASE_CONFIG, supported_installers,
+ vnf_names)
+ test_run.run()
diff --git a/sfc/tests/functest/sfc_one_chain_two_service_functions.py b/sfc/tests/functest/sfc_one_chain_two_service_functions.py
index 043b5a6a..38fa3fef 100644
--- a/sfc/tests/functest/sfc_one_chain_two_service_functions.py
+++ b/sfc/tests/functest/sfc_one_chain_two_service_functions.py
@@ -7,277 +7,97 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-import os
-import sys
import threading
import logging
+import urllib3
-import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
-import opnfv.utils.ovs_logger as ovs_log
-
import sfc.lib.config as sfc_config
-import sfc.lib.test_utils as test_utils
-from sfc.lib.results import Results
-from opnfv.deployment.factory import Factory as DeploymentFactory
-import sfc.lib.topology_shuffler as topo_shuffler
+from sfc.tests.functest import sfc_parent_function
""" logging configuration """
logger = logging.getLogger(__name__)
-CLIENT = "client"
-SERVER = "server"
-COMMON_CONFIG = sfc_config.CommonConfig()
-TESTCASE_CONFIG = sfc_config.TestcaseConfig(
- 'sfc_one_chain_two_service'
- '_functions')
+class SfcOneChainTwoServiceTC(sfc_parent_function.SfcCommonTestCase):
+ """We create one client and one server using nova.
+ Then, 2 SFs are created using tacker.
+ A chain is created where both SFs are included.
+ The vxlan tool is used on both SFs. The purpose is to
+ check different HTTP traffic combinations.
+ """
+ def run(self):
-def main():
- deploymentHandler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
+ logger.info("The test scenario %s is starting", __name__)
- installer_type = os.environ.get("INSTALLER_TYPE")
+ self.register_vnf_template(self.testcase_config.test_vnfd_red,
+ 'test-vnfd1')
+ self.register_vnf_template(self.testcase_config.test_vnfd_blue,
+ 'test-vnfd2')
- supported_installers = ['fuel', 'apex', 'osa']
+ self.create_vnf(self.vnfs[0], 'test-vnfd1', 'test-vim')
+ self.create_vnf(self.vnfs[1], 'test-vnfd2', 'test-vim')
- if installer_type not in supported_installers:
- logger.error(
- '\033[91mYour installer is not supported yet\033[0m')
- sys.exit(1)
+ self.create_vnffg(self.testcase_config.test_vnffgd_red, 'red',
+ 'red_http', port=80, protocol='tcp', symmetric=False)
+ # Start measuring the time it takes to implement the
+ # classification rules
+ t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.hypervisor_hostname,
+ [self.neutron_port],))
+ try:
+ t1.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
- installer_ip = os.environ.get("INSTALLER_IP")
- if not installer_ip:
- logger.error(
- '\033[91minstaller ip is not set\033[0m')
- logger.error(
- '\033[91mexport INSTALLER_IP=<ip>\033[0m')
- sys.exit(1)
+ self.assign_floating_ip_client_server()
- cluster = COMMON_CONFIG.installer_cluster
+ self.assign_floating_ip_sfs()
- openstack_nodes = (deploymentHandler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deploymentHandler.get_nodes())
+ self.check_floating_ips()
+ self.start_services_in_vm()
- controller_nodes = [node for node in openstack_nodes
- if node.is_controller()]
- compute_nodes = [node for node in openstack_nodes
- if node.is_compute()]
+ t1.join()
- odl_ip, odl_port = odl_utils.get_odl_ip_port(openstack_nodes)
+ logger.info("Allowed HTTP scenario")
+ results = self.present_results_allowed_http()
- for compute in compute_nodes:
- logger.info("This is a compute: %s" % compute.ip)
+ self.vxlan_blocking_start(self.fips_sfs[0], "80")
+ results = self.present_results_http()
- results = Results(COMMON_CONFIG.line_length)
- results.add_to_summary(0, "=")
- results.add_to_summary(2, "STATUS", "SUBTEST")
- results.add_to_summary(0, "=")
+ self.vxlan_blocking_start(self.fips_sfs[1], "80")
+ self.vxlan_blocking_stop(self.fips_sfs[0])
- openstack_sfc = os_sfc_utils.OpenStackSFC()
+ results = self.present_results_http()
- custom_flv = openstack_sfc.create_flavor(
- COMMON_CONFIG.flavor,
- COMMON_CONFIG.ram_size_in_mb,
- COMMON_CONFIG.disk_size_in_gb,
- COMMON_CONFIG.vcpu_count)
- if not custom_flv:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
+ if __name__ == '__main__':
+ return results.compile_summary(), self.creators
- tacker_client = os_sfc_utils.get_tacker_client()
+ if __name__ == \
+ 'sfc.tests.functest.sfc_one_chain_two_service_functions':
+ return results.compile_summary(), self.creators
- controller_clients = test_utils.get_ssh_clients(controller_nodes)
- compute_clients = test_utils.get_ssh_clients(compute_nodes)
+ def get_creators(self):
+ """Return the creators info, specially in case the info is not
+ returned due to an exception.
- ovs_logger = ovs_log.OVSLogger(
- os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
- COMMON_CONFIG.functest_results_dir)
+ :return: creators
+ """
+ return self.creators
- image_creator = openstack_sfc.register_glance_image(
- COMMON_CONFIG.image_name,
- COMMON_CONFIG.image_url,
- COMMON_CONFIG.image_format,
- 'public')
- network, router = openstack_sfc.create_network_infrastructure(
- TESTCASE_CONFIG.net_name,
- TESTCASE_CONFIG.subnet_name,
- TESTCASE_CONFIG.subnet_cidr,
- TESTCASE_CONFIG.router_name)
+if __name__ == '__main__':
- sg = openstack_sfc.create_security_group(TESTCASE_CONFIG.secgroup_name)
+ # Disable InsecureRequestWarning errors when executing the SFC tests in XCI
+ urllib3.disable_warnings()
+ TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_one_chain_two_service'
+ '_functions')
+ supported_installers = ['fuel', 'apex', 'osa', 'compass']
vnfs = ['testVNF1', 'testVNF2']
- topo_seed = topo_shuffler.get_seed()
- testTopology = topo_shuffler.topology(vnfs, openstack_sfc, seed=topo_seed)
-
- logger.info('This test is run with the topology {0}'
- .format(testTopology['id']))
- logger.info('Topology description: {0}'
- .format(testTopology['description']))
-
- client_instance, client_creator = openstack_sfc.create_instance(
- CLIENT, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['client'])
-
- server_instance, server_creator = openstack_sfc.create_instance(
- SERVER, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['server'])
-
- server_ip = server_instance.ports[0].ips[0]['ip_address']
- logger.info("Server instance received private ip [{}]".format(server_ip))
-
- os_sfc_utils.register_vim(tacker_client, vim_file=COMMON_CONFIG.vim_file)
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_red)
-
- os_sfc_utils.create_vnfd(
- tacker_client,
- tosca_file=tosca_file, vnfd_name='test-vnfd1')
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_blue)
- os_sfc_utils.create_vnfd(
- tacker_client,
- tosca_file=tosca_file, vnfd_name='test-vnfd2')
-
- default_param_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- COMMON_CONFIG.vnfd_default_params_file)
-
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client, vnfs[0], 'test-vnfd1', 'test-vim',
- default_param_file, testTopology[vnfs[0]])
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client, vnfs[1], 'test-vnfd2', 'test-vim',
- default_param_file, testTopology[vnfs[1]])
-
- vnf1_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnfs[0])
- vnf2_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnfs[1])
- if vnf1_id is None or vnf2_id is None:
- logger.error('ERROR while booting vnfs')
- sys.exit(1)
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- TESTCASE_CONFIG.test_vnffgd_red)
-
- os_sfc_utils.create_vnffgd(tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='red')
-
- neutron_port = openstack_sfc.get_client_port_id(client_instance)
- os_sfc_utils.create_vnffg_with_param_file(tacker_client, 'red',
- 'red_http',
- default_param_file,
- neutron_port.id)
-
- # Start measuring the time it takes to implement the classification rules
- t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip, odl_port,))
- try:
- t1.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
-
- logger.info("Assigning floating IPs to instances")
- client_floating_ip = openstack_sfc.assign_floating_ip(router,
- client_instance,
- client_creator)
- server_floating_ip = openstack_sfc.assign_floating_ip(router,
- server_instance,
- server_creator)
- fips_sfs = openstack_sfc.assign_floating_ip_vnfs(router)
- sf1_floating_ip = fips_sfs[0]
- sf2_floating_ip = fips_sfs[1]
-
- fips = [client_floating_ip, server_floating_ip, sf1_floating_ip,
- sf2_floating_ip]
-
- for ip in fips:
- logger.info("Checking connectivity towards floating IP [%s]" % ip)
- if not test_utils.ping(ip, retries=50, retry_timeout=3):
- logger.error("Cannot ping floating IP [%s]" % ip)
- os_sfc_utils.get_tacker_items()
- odl_utils.get_odl_items(odl_ip, odl_port)
- sys.exit(1)
- logger.info("Successful ping to floating IP [%s]" % ip)
-
- if not test_utils.check_ssh([sf1_floating_ip, sf2_floating_ip]):
- logger.error("Cannot establish SSH connection to the SFs")
- sys.exit(1)
-
- logger.info("Starting HTTP server on %s" % server_floating_ip)
- if not test_utils.start_http_server(server_floating_ip):
- logger.error(
- 'Failed to start HTTP server on %s' % server_floating_ip)
- sys.exit(1)
-
- for sf_floating_ip in (sf1_floating_ip, sf2_floating_ip):
- logger.info("Starting vxlan_tool on %s" % sf_floating_ip)
- test_utils.start_vxlan_tool(sf_floating_ip)
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- t1.join()
-
- logger.info("Test HTTP")
- if not test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP works")
- else:
- error = ('\033[91mTEST 1 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP blocked")
-
- logger.info("Changing the vxlan_tool to block HTTP traffic")
-
- # Make SF1 block http traffic
- test_utils.stop_vxlan_tool(sf1_floating_ip)
- logger.info("Starting HTTP firewall on %s" % sf1_floating_ip)
- test_utils.start_vxlan_tool(sf1_floating_ip, block="80")
-
- logger.info("Test HTTP again blocking SF1")
- if test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
- else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP not blocked")
-
- # Make SF2 block http traffic
- test_utils.stop_vxlan_tool(sf2_floating_ip)
- logger.info("Starting HTTP firewall on %s" % sf2_floating_ip)
- test_utils.start_vxlan_tool(sf2_floating_ip, block="80")
- logger.info("Stopping HTTP firewall on %s" % sf1_floating_ip)
- test_utils.stop_vxlan_tool(sf1_floating_ip)
-
- logger.info("Test HTTP again blocking SF2")
- if test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
- else:
- error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP not blocked")
-
- return results.compile_summary(), openstack_sfc.creators
-
-
-if __name__ == '__main__':
- logging.config.fileConfig(COMMON_CONFIG.functest_logging_api)
- main()
+ test_run = SfcOneChainTwoServiceTC(TESTCASE_CONFIG, supported_installers,
+ vnfs)
+ test_run.run()
diff --git a/sfc/tests/functest/sfc_parent_function.py b/sfc/tests/functest/sfc_parent_function.py
new file mode 100644
index 00000000..410c0e71
--- /dev/null
+++ b/sfc/tests/functest/sfc_parent_function.py
@@ -0,0 +1,768 @@
+import logging
+import os
+import urllib3
+
+import sfc.lib.test_utils as test_utils
+import sfc.lib.openstack_utils as os_sfc_utils
+import sfc.lib.topology_shuffler as topo_shuffler
+
+from opnfv.utils import ovs_logger as ovs_log
+from opnfv.deployment.factory import Factory as DeploymentFactory
+from sfc.lib import config as sfc_config
+from sfc.lib import odl_utils as odl_utils
+from sfc.lib.results import Results
+
+# Disable InsecureRequestWarning errors when executing the SFC tests in XCI
+urllib3.disable_warnings()
+
+logger = logging.getLogger(__name__)
+CLIENT = "client"
+SERVER = "server"
+openstack_sfc = os_sfc_utils.OpenStackSFC()
+COMMON_CONFIG = sfc_config.CommonConfig()
+results = Results(COMMON_CONFIG.line_length)
+
+
+class SfcCommonTestCase(object):
+
+ def __init__(self, testcase_config, supported_installers, vnfs):
+
+ self.compute_nodes = None
+ self.controller_clients = None
+ self.compute_clients = None
+ self.tacker_client = None
+ self.ovs_logger = None
+ self.network = None
+ self.router = None
+ self.sg = None
+ self.image_creator = None
+ self.vnf_image_creator = None
+ self.creators = None
+ self.odl_ip = None
+ self.odl_port = None
+ self.default_param_file = None
+ self.topo_seed = None
+ self.test_topology = None
+ self.server_instance = None
+ self.server_creator = None
+ self.client_instance = None
+ self.client_creator = None
+ self.vnf_id = None
+ self.client_floating_ip = None
+ self.server_floating_ip = None
+ self.fips_sfs = []
+ self.vnf_objects = dict()
+ self.testcase_config = testcase_config
+ self.vnfs = vnfs
+ self.port_server = None
+ self.server_ip = None
+ self.port_client = None
+
+ self.prepare_env(testcase_config, supported_installers, vnfs)
+
+ def prepare_env(self, testcase_config, supported_installers, vnfs):
+ """Prepare the testcase environment and the components
+ that the test scenario is going to use later on.
+
+ :param testcase_config: the input test config file
+ :param supported_installers: the supported installers for this tc
+ :param vnfs: the names of vnfs
+ :return: Environment preparation
+ """
+
+ if COMMON_CONFIG.installer_type != 'configByUser':
+ deployment_handler = DeploymentFactory.get_handler(
+ COMMON_CONFIG.installer_type,
+ COMMON_CONFIG.installer_ip,
+ COMMON_CONFIG.installer_user,
+ COMMON_CONFIG.installer_password,
+ COMMON_CONFIG.installer_key_file)
+
+ installer_type = os.environ.get("INSTALLER_TYPE")
+ installer_ip = os.environ.get("INSTALLER_IP")
+ cluster = COMMON_CONFIG.installer_cluster
+ openstack_nodes = (deployment_handler.
+ get_nodes({'cluster': cluster})
+ if cluster is not None
+ else deployment_handler.get_nodes())
+
+ self.compute_nodes = [node for node in openstack_nodes
+ if node.is_compute()]
+
+ for compute in self.compute_nodes:
+ logger.info("This is a compute: %s" % compute.ip)
+
+ controller_nodes = [node for node in openstack_nodes
+ if node.is_controller()]
+ self.controller_clients = test_utils. \
+ get_ssh_clients(controller_nodes)
+ self.compute_clients = test_utils. \
+ get_ssh_clients(self.compute_nodes)
+
+ self.odl_ip, self.odl_port = odl_utils. \
+ get_odl_ip_port(openstack_nodes)
+
+ else:
+ installer_type = 'configByUser'
+ installer_ip = COMMON_CONFIG.installer_ip
+ openstack_nodes = COMMON_CONFIG.nodes_pod
+ self.compute_nodes = [node for node in
+ COMMON_CONFIG.nodes_pod
+ if node['role'] == 'Compute']
+
+ for compute in self.compute_nodes:
+ logger.info("This is a compute: %s" % compute['ip'])
+
+ controller_nodes = [node for node in openstack_nodes
+ if node['role'] == 'Controller']
+
+ self.odl_ip, self.odl_port = odl_utils. \
+ get_odl_ip_port_no_installer(openstack_nodes)
+
+ if installer_type not in supported_installers:
+ if installer_type != 'configByUser':
+ raise Exception(
+ '\033[91mYour installer is not supported yet\033[0m')
+
+ if not installer_ip:
+ logger.error(
+ '\033[91minstaller ip is not set\033[0m')
+ raise Exception(
+ '\033[91mexport INSTALLER_IP=<ip>\033[0m')
+
+ results.add_to_summary(0, "=")
+ results.add_to_summary(2, "STATUS", "SUBTEST")
+ results.add_to_summary(0, "=")
+
+ custom_flv = openstack_sfc.create_flavor(
+ COMMON_CONFIG.flavor,
+ COMMON_CONFIG.ram_size_in_mb,
+ COMMON_CONFIG.disk_size_in_gb,
+ COMMON_CONFIG.vcpu_count)
+ if not custom_flv:
+ raise Exception("Failed to create custom flavor")
+
+ if COMMON_CONFIG.mano_component == 'tacker':
+ self.tacker_client = os_sfc_utils.get_tacker_client()
+ os_sfc_utils.register_vim(self.tacker_client,
+ vim_file=COMMON_CONFIG.vim_file)
+
+ self.ovs_logger = ovs_log.OVSLogger(
+ os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
+ COMMON_CONFIG.functest_results_dir)
+
+ self.network, self.router = openstack_sfc.\
+ create_network_infrastructure(testcase_config.net_name,
+ testcase_config.subnet_name,
+ testcase_config.subnet_cidr,
+ testcase_config.router_name)
+
+ self.sg = openstack_sfc.create_security_group(
+ testcase_config.secgroup_name)
+
+ # Image for the vnf is registered
+ self.vnf_image_creator = openstack_sfc.register_glance_image(
+ COMMON_CONFIG.vnf_image_name,
+ COMMON_CONFIG.vnf_image_url,
+ COMMON_CONFIG.vnf_image_format,
+ 'public')
+
+ # Image for the client/server is registered
+ self.image_creator = openstack_sfc.register_glance_image(
+ COMMON_CONFIG.image_name,
+ COMMON_CONFIG.image_url,
+ COMMON_CONFIG.image_format,
+ 'public')
+
+ self.creators = openstack_sfc.creators
+
+ odl_utils.get_odl_username_password()
+
+ self.default_param_file = os.path.join(
+ COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnfd_dir,
+ COMMON_CONFIG.vnfd_default_params_file)
+
+ self.topo_seed = topo_shuffler.get_seed()
+ self.test_topology = topo_shuffler.topology(vnfs, openstack_sfc,
+ seed=self.topo_seed)
+
+ logger.info('This test is run with the topology {0}'
+ .format(self.test_topology['id']))
+ logger.info('Topology description: {0}'
+ .format(self.test_topology['description']))
+
+ self.server_instance, port_server = \
+ openstack_sfc.create_instance(SERVER, COMMON_CONFIG.flavor,
+ self.image_creator, self.network,
+ self.sg,
+ self.test_topology['server'],
+ [SERVER + '-port'])
+
+ self.client_instance, port_client = \
+ openstack_sfc.create_instance(CLIENT, COMMON_CONFIG.flavor,
+ self.image_creator, self.network,
+ self.sg,
+ self.test_topology['client'],
+ [CLIENT + '-port'])
+
+ logger.info('This test is run with the topology {0}'.format(
+ self.test_topology['id']))
+ logger.info('Topology description: {0}'.format(
+ self.test_topology['description']))
+
+ if COMMON_CONFIG.installer_type != 'configByUser':
+ self.port_server = port_server[0]
+ self.port_client = port_client[0]
+ port_fixed_ips = self.port_server
+ for ip in port_fixed_ips:
+ self.server_ip = ip.get('ip_address')
+ logger.info("Server instance received private ip [{}]".format(
+ self.server_ip))
+ else:
+ self.port_server = port_server
+ self.port_client = port_client
+ self.server_ip = self.server_instance.ports[0].ips[0]['ip_address']
+ logger.info("Server instance received private ip [{}]".format(
+ self.server_ip))
+
+ def register_vnf_template(self, test_case_name, template_name):
+ """ Register the template which defines the VNF
+
+ :param test_case_name: the name of the test case
+ :param template_name: name of the template
+ """
+
+ if COMMON_CONFIG.mano_component == 'tacker':
+ self.create_custom_vnfd(test_case_name, template_name)
+
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ # networking-sfc does not have the template concept
+ pass
+
+ def create_custom_vnfd(self, test_case_name, vnfd_name):
+ """Create VNF Descriptor (VNFD)
+
+ :param test_case_name: the name of test case
+ :param vnfd_name: the name of vnfd
+ :return: vnfd
+ """
+
+ tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnfd_dir, test_case_name)
+
+ os_sfc_utils.create_vnfd(self.tacker_client,
+ tosca_file=tosca_file,
+ vnfd_name=vnfd_name)
+
+ def create_vnf(self, vnf_name, vnfd_name=None, vim_name=None,
+ symmetric=False):
+ """Create custom vnf
+
+ :param vnf_name: name of the vnf
+ :param vnfd_name: name of the vnfd template (tacker)
+ :param vim_name: name of the vim (tacker)
+ :param symmetric: specifies whether this is part of the symmetric test
+ :return: av zone
+ """
+
+ logger.info('This test is run with the topology {0}'.
+ format(self.test_topology['id']))
+ logger.info('Topology description: {0}'
+ .format(self.test_topology['description']))
+
+ if COMMON_CONFIG.mano_component == 'tacker':
+ os_sfc_utils.create_vnf_in_av_zone(
+ self.tacker_client, vnf_name, vnfd_name, vim_name,
+ self.default_param_file, self.test_topology[vnf_name])
+
+ self.vnf_id = os_sfc_utils.wait_for_vnf(self.tacker_client,
+ vnf_name=vnf_name)
+ if self.vnf_id is None:
+ raise Exception('ERROR while booting vnfs')
+
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ av_zone = self.test_topology[vnf_name]
+ if symmetric:
+ ports = [vnf_name + '-port1', vnf_name + '-port2']
+ else:
+ ports = [vnf_name + '-port']
+ vnf_instance, vnf_port = \
+ openstack_sfc.create_instance(vnf_name, COMMON_CONFIG.flavor,
+ self.vnf_image_creator,
+ self.network,
+ self.sg,
+ av_zone,
+ ports,
+ port_security=False)
+
+ self.vnf_objects[vnf_name] = [vnf_instance, vnf_port]
+ logger.info("Creating VNF with name...%s", vnf_name)
+ logger.info("Port associated with VNF...%s",
+ self.vnf_objects[vnf_name][1])
+
+ def assign_floating_ip_client_server(self):
+ """Assign floating IPs on the router about server and the client
+ instances
+ :return: Floating IPs for client and server
+ """
+ logger.info("Assigning floating IPs to client and server instances")
+
+ self.client_floating_ip = openstack_sfc.assign_floating_ip(
+ self.client_instance, self.port_client)
+ self.server_floating_ip = openstack_sfc.assign_floating_ip(
+ self.server_instance, self.port_server)
+
+ def assign_floating_ip_sfs(self):
+ """Assign floating IPs to service function
+
+ :return: The list fips_sfs consist of the available IPs for service
+ functions
+ """
+
+ logger.info("Assigning floating IPs to service functions")
+
+ if COMMON_CONFIG.mano_component == 'tacker':
+ vnf_ip = os_sfc_utils.get_vnf_ip(self.tacker_client,
+ vnf_id=self.vnf_id)
+ self.fips_sfs = openstack_sfc.assign_floating_ip_vnfs(self.router,
+ vnf_ip)
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ for vnf in self.vnfs:
+ # instance object is in [0] and port in [1]
+ vnf_instance = self.vnf_objects[vnf][0]
+ vnf_port = self.vnf_objects[vnf][1]
+ sf_floating_ip = openstack_sfc.\
+ assign_floating_ip(vnf_instance, vnf_port[0])
+ self.fips_sfs.append(sf_floating_ip)
+
+ def check_floating_ips(self):
+ """Check the responsivness of the floating IPs
+
+ :return: The responsivness of IPs in the fips_sfs list is checked
+ """
+
+ fips = [self.client_floating_ip, self.server_floating_ip]
+
+ for sf in self.fips_sfs:
+ fips.append(sf)
+
+ for ip in fips:
+ logger.info("Checking connectivity towards floating IP [%s]" % ip)
+ if not test_utils.ping(ip, retries=50, retry_timeout=3):
+ os_sfc_utils.get_tacker_items()
+ odl_utils.get_odl_items(self.odl_ip, self.odl_port)
+ raise Exception("Cannot ping floating IP [%s]" % ip)
+ logger.info("Successful ping to floating IP [%s]" % ip)
+
+ if not test_utils.check_ssh(self.fips_sfs):
+ raise Exception("Cannot establish SSH connection to the SFs")
+
+ def start_services_in_vm(self):
+ """Start the HTTP server in the server VM as well as the vxlan tool for
+ the SFs IPs included in the fips_sfs list
+
+ :return: HTTP server and vxlan tools are started
+ """
+
+ logger.info("Starting HTTP server on %s" % self.server_floating_ip)
+ if not test_utils.start_http_server(self.server_floating_ip):
+ raise Exception('\033[91mFailed to start HTTP server on %s\033[0m'
+ % self.server_floating_ip)
+
+ for sf_floating_ip in self.fips_sfs:
+ logger.info("Starting vxlan_tool on %s" % sf_floating_ip)
+ test_utils.start_vxlan_tool(sf_floating_ip)
+
+ def present_results_ssh(self):
+ """Check whether the connection between server and client using
+ SSH protocol is blocked or not.
+
+ :return: The results for the specific action of the scenario
+ """
+
+ logger.info("Test SSH")
+ if test_utils.is_ssh_blocked(self.client_floating_ip, self.server_ip):
+ results.add_to_summary(2, "PASS", "SSH Blocked")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> SSH NOT BLOCKED\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "SSH Works")
+
+ return results
+
+ def present_results_allowed_ssh(self):
+ """Check whether the connection between server and client using
+ SSH protocol is available or not.
+
+ :return: The results for the specific action of the scenario
+ """
+
+ logger.info("Test SSH")
+ if not test_utils.is_ssh_blocked(self.client_floating_ip,
+ self.server_ip):
+ results.add_to_summary(2, "PASS", "SSH works")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> SSH BLOCKED\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "SSH is blocked")
+
+ return results
+
+ def remove_vnffg(self, par_vnffg_name, par_vnffgd_name):
+ """Delete the vnffg and the vnffgd items that have been created
+ during the test scenario.
+
+ :param par_vnffg_name: The vnffg name of network components
+ :param par_vnffgd_name: The vnffgd name of network components
+ :return: Remove the vnffg and vnffgd components
+ """
+ if COMMON_CONFIG.mano_component == 'tacker':
+ os_sfc_utils.delete_vnffg(self.tacker_client,
+ vnffg_name=par_vnffg_name)
+
+ os_sfc_utils.delete_vnffgd(self.tacker_client,
+ vnffgd_name=par_vnffgd_name)
+
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ # TODO: If we had a testcase where only one chains must be removed
+ # we would need to add the logic. Now it removes all of them
+ openstack_sfc.delete_chain()
+ openstack_sfc.delete_port_groups()
+
+ def create_classifier(self, fc_name, port=85,
+ protocol='tcp', symmetric=False):
+ """Create the classifier component following the instructions from
+ relevant templates.
+
+ :param fc_name: The name of the classifier
+ :param port: Input port number
+ :param protocol: Input protocol
+ :param symmetric: Check symmetric
+ :return: Create the classifier component
+ """
+
+ logger.info("Creating the classifier...")
+
+ self.neutron_port = self.port_client
+ if COMMON_CONFIG.mano_component == 'no-mano':
+ openstack_sfc.create_classifier(self.neutron_port.id,
+ port,
+ protocol,
+ fc_name,
+ symmetric)
+
+ elif COMMON_CONFIG.mano_component == 'tacker':
+ logger.info("Creating classifier with tacker is not supported")
+
+ def create_vnffg(self, testcase_config_name, vnffgd_name, vnffg_name,
+ port=80, protocol='tcp', symmetric=False, vnf_index=-1):
+ """Create the vnffg components following the instructions from
+ relevant templates.
+
+ :param testcase_config_name: The config input of the test case
+ :param vnffgd_name: The name of the vnffgd template
+ :param vnffg_name: The name for the vnffg
+ :param port: Input port number
+ :param protocol: Input protocol
+ :param symmetric: Check symmetric
+ :param vnf_index: Index to specify vnf
+ :return: Create the vnffg component
+ """
+
+ logger.info("Creating the vnffg...")
+
+ if COMMON_CONFIG.mano_component == 'tacker':
+ tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnffgd_dir,
+ testcase_config_name)
+
+ os_sfc_utils.create_vnffgd(self.tacker_client,
+ tosca_file=tosca_file,
+ vnffgd_name=vnffgd_name)
+
+ self.neutron_port = self.port_client
+
+ if symmetric:
+ server_ip_prefix = self.server_ip + '/32'
+
+ os_sfc_utils.create_vnffg_with_param_file(
+ self.tacker_client,
+ vnffgd_name,
+ vnffg_name,
+ self.default_param_file,
+ self.neutron_port.id,
+ server_port=self.port_server.id,
+ server_ip=server_ip_prefix)
+
+ else:
+ os_sfc_utils.create_vnffg_with_param_file(
+ self.tacker_client,
+ vnffgd_name,
+ vnffg_name,
+ self.default_param_file,
+ self.neutron_port.id)
+
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ logger.info("Creating the vnffg without any mano component...")
+ port_groups = []
+ if vnf_index == -1:
+ for vnf in self.vnfs:
+ # vnf_instance is in [0] and vnf_port in [1]
+ vnf_instance = self.vnf_objects[vnf][0]
+ vnf_port = self.vnf_objects[vnf][1]
+ if symmetric:
+ # VNFs have two ports
+ neutron_port1 = vnf_port[0]
+ neutron_port2 = vnf_port[1]
+ neutron_ports = [neutron_port1, neutron_port2]
+ else:
+ neutron_port1 = vnf_port[0]
+ neutron_ports = [neutron_port1]
+
+ port_group = \
+ openstack_sfc.create_port_groups(neutron_ports,
+ vnf_instance)
+ port_groups.append(port_group)
+
+ else:
+ vnf = self.vnfs[vnf_index]
+ vnf_instance = self.vnf_objects[vnf][0]
+ vnf_port = self.vnf_objects[vnf][1]
+ if symmetric:
+ # VNFs have two ports
+ neutron_port1 = vnf_port[0]
+ neutron_port2 = vnf_port[1]
+ neutron_ports = [neutron_port1, neutron_port2]
+ else:
+ neutron_port1 = vnf_port[0]
+ neutron_ports = [neutron_port1]
+
+ port_group = openstack_sfc.create_port_groups(
+ neutron_ports, vnf_instance)
+ port_groups.append(port_group)
+
+ self.neutron_port = self.port_client
+
+ if symmetric:
+ # We must pass the server_port and server_ip in the symmetric
+ # case. Otherwise ODL does not work well
+ server_ip_prefix = self.server_ip + '/32'
+ openstack_sfc.create_chain(port_groups,
+ self.neutron_port.id,
+ port, protocol, vnffg_name,
+ symmetric,
+ server_port=self.port_server.id,
+ server_ip=server_ip_prefix)
+
+ else:
+ openstack_sfc.create_chain(port_groups,
+ self.neutron_port.id,
+ port, protocol, vnffg_name,
+ symmetric)
+
+ def update_vnffg(self, testcase_config_name, vnffgd_name, vnffg_name,
+ port=80, protocol='tcp', symmetric=False,
+ vnf_index=0, fc_name='red'):
+ """Update the vnffg components following the instructions from
+ relevant templates.
+
+ :param testcase_config_name: The config input of the test case
+ :param vnffgd_name: The name of the vnffgd template
+ :param vnffg_name: The name for the vnffg
+ :param port: To input port number
+ :param protocol: To input protocol
+ :param symmetric: To check symmetric
+ :param vnf_index: Index to identify vnf
+ :param fc_name: The name of the flow classifier
+ :return: Update the vnffg component
+ """
+
+ logger.info("Update the vnffg...")
+
+ if COMMON_CONFIG.mano_component == 'no-mano':
+ port_groups = []
+ for vnf in self.vnfs:
+ # vnf_instance is in [0] and vnf_port in [1]
+ vnf_instance = self.vnf_objects[vnf][0]
+ vnf_port = self.vnf_objects[vnf][1]
+ if symmetric:
+ # VNFs have two ports
+ neutron_port1 = vnf_port[0]
+ neutron_port2 = vnf_port[1]
+ neutron_ports = [neutron_port1, neutron_port2]
+ else:
+ neutron_port1 = vnf_port[0]
+ neutron_ports = [neutron_port1]
+
+ port_group = \
+ openstack_sfc.create_port_groups(neutron_ports,
+ vnf_instance)
+ port_groups.append(port_group)
+
+ openstack_sfc.update_chain(vnffg_name, fc_name, symmetric)
+
+ elif COMMON_CONFIG.mano_component == 'tacker':
+ logger.info("update for tacker is not supported")
+
+ def swap_classifiers(self, vnffg_1_name, vnffg_2_name, symmetric=False):
+ """Interchange classifiers between port chains
+
+ :param vnffg_1_name: Reference to port_chain_1
+ :param vnffg_2_name: Reference to port_chain_2
+ :param symmetric: To check symmetric
+ :return: Interchange the classifiers
+ """
+
+ if COMMON_CONFIG.mano_component == 'no-mano':
+ openstack_sfc.swap_classifiers(vnffg_1_name,
+ vnffg_2_name,
+ symmetric=False)
+
+ def present_results_http(self):
+ """Check whether the connection between server and client using
+ HTTP protocol is blocked or not.
+
+ :return: The results for the specific action of the scenario
+ """
+
+ logger.info("Test HTTP")
+ if test_utils.is_http_blocked(self.client_floating_ip, self.server_ip):
+ results.add_to_summary(2, "PASS", "HTTP Blocked")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> HTTP WORKS\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "HTTP works")
+
+ return results
+
+ def present_results_allowed_port_http(self, testcase_config):
+ """Check whether the connection between server and client using
+ HTTP protocol and for a specific port is available or not.
+
+ :param testcase_config: The config input of the test case
+ :return: The results for the specific action of the scenario
+ """
+
+ allowed_port = testcase_config.source_port
+ logger.info("Test if HTTP from port %s works" % allowed_port)
+ if not test_utils.is_http_blocked(
+ self.client_floating_ip, self.server_ip, allowed_port):
+ results.add_to_summary(2, "PASS", "HTTP works")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> HTTP BLOCKED\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "HTTP is blocked")
+
+ return results
+
+ def present_results_blocked_port_http(self, testcase_config,
+ test='HTTP'):
+ """Check whether the connection between server and client using
+ HTTP protocol and for a specific port is blocked or not.
+
+ :param testcase_config: The config input of the test case
+ :param test: custom test string to print on result summary
+ :return: The results for the specific action of the scenario
+ """
+
+ allowed_port = testcase_config.source_port
+ logger.info("Test if HTTP from port %s doesn't work" % allowed_port)
+ if test_utils.is_http_blocked(
+ self.client_floating_ip, self.server_ip, allowed_port):
+ results.add_to_summary(2, "PASS", test + " blocked")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> HTTP WORKS\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", test + " works")
+
+ return results
+
+ def check_deletion(self):
+ """Check that the deletion of the chain has been completed sucessfully.
+
+ :return: Check that the chain has been completed deleted without
+ leftovers.
+ """
+
+ if not odl_utils.\
+ check_vnffg_deletion(self.odl_ip, self.odl_port,
+ self.ovs_logger,
+ [self.neutron_port],
+ self.client_instance.hypervisor_hostname,
+ self.compute_nodes):
+ logger.debug("The chains were not correctly removed")
+ raise Exception("Chains not correctly removed, test failed")
+
+ def present_results_allowed_http(self):
+ """Check whether the connection between server and client using
+ HTTP protocol is available or not.
+
+ :return: The results for the specific action of the scenario
+ """
+
+ if not test_utils.is_http_blocked(self.client_floating_ip,
+ self.server_ip):
+ results.add_to_summary(2, "PASS", "HTTP works")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> HTTP BLOCKED\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "HTTP is blocked")
+
+ return results
+
+ def vxlan_blocking_start(self, floating_ip, port_blocked):
+ """Start the vxlan tool for one floating IP and blocking
+ a specific port.
+
+ :param floating_ip: Floating IP
+ :param port_blocked: Port
+ :return: The port for the floating IP is blocked
+ """
+
+ test_utils.stop_vxlan_tool(floating_ip)
+ logger.info("Starting HTTP firewall on %s" % floating_ip)
+ test_utils.start_vxlan_tool(floating_ip, block=port_blocked)
+
+ def vxlan_blocking_stop(self, floating_ip):
+ """Stop the vxlan tool for a specific IP
+
+ :param floating_ip: Floating IP
+ :return: The vxlan tool for the specific floating IP is stopped
+ """
+
+ logger.info("Starting HTTP firewall on %s" % floating_ip)
+ test_utils.stop_vxlan_tool(floating_ip)
+
+ def vxlan_start_interface(self, floating_ip, interface, output, block):
+ """Start the vxlan tool for one floating IP and blocking
+ a specific interface.
+
+ :param floating_ip: Floating IP
+ :param interface: Interface
+ :param output: output interface
+ :param block: port
+ :return: The interface or/and port for specific floating IP is blocked
+ """
+
+ logger.info("Starting vxlan_tool on %s" % floating_ip)
+ test_utils.start_vxlan_tool(floating_ip, interface=interface,
+ output=output, block=block)
diff --git a/sfc/tests/functest/sfc_symmetric_chain.py b/sfc/tests/functest/sfc_symmetric_chain.py
index b8d35514..cec45219 100644
--- a/sfc/tests/functest/sfc_symmetric_chain.py
+++ b/sfc/tests/functest/sfc_symmetric_chain.py
@@ -1,4 +1,4 @@
-#!/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 Ericsson AB and others. All rights reserved
#
@@ -8,228 +8,148 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-
-import os
-import sys
import threading
import logging
+import urllib3
-import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
-import opnfv.utils.ovs_logger as ovs_log
-from opnfv.deployment.factory import Factory as DeploymentFactory
-
import sfc.lib.config as sfc_config
-import sfc.lib.utils as test_utils
-from sfc.lib.results import Results
-import sfc.lib.topology_shuffler as topo_shuffler
-
+from sfc.tests.functest import sfc_parent_function
+""" logging configuration """
logger = logging.getLogger(__name__)
-
+COMMON_CONFIG = sfc_config.CommonConfig()
CLIENT = "client"
SERVER = "server"
-COMMON_CONFIG = sfc_config.CommonConfig()
-TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_symmetric_chain')
-
-
-def main():
- deploymentHandler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
-
- cluster = COMMON_CONFIG.installer_cluster
- all_nodes = (deploymentHandler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deploymentHandler.get_nodes())
-
- controller_nodes = [node for node in all_nodes if node.is_controller()]
- compute_nodes = [node for node in all_nodes if node.is_compute()]
-
- odl_ip, odl_port = odl_utils.get_odl_ip_port(all_nodes)
-
- results = Results(COMMON_CONFIG.line_length)
- results.add_to_summary(0, "=")
- results.add_to_summary(2, "STATUS", "SUBTEST")
- results.add_to_summary(0, "=")
-
- openstack_sfc = os_sfc_utils.OpenStackSFC()
-
- tacker_client = os_sfc_utils.get_tacker_client()
-
- _, custom_flavor = openstack_sfc.get_or_create_flavor(
- COMMON_CONFIG.flavor,
- COMMON_CONFIG.ram_size_in_mb,
- COMMON_CONFIG.disk_size_in_gb,
- COMMON_CONFIG.vcpu_count)
- if custom_flavor is None:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
-
- controller_clients = test_utils.get_ssh_clients(controller_nodes)
- compute_clients = test_utils.get_ssh_clients(compute_nodes)
-
- ovs_logger = ovs_log.OVSLogger(
- os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
- COMMON_CONFIG.functest_results_dir)
-
- image_creator = openstack_sfc.register_glance_image(
- COMMON_CONFIG.image_name,
- COMMON_CONFIG.image_url,
- COMMON_CONFIG.image_format,
- 'public')
-
- network, router = openstack_sfc.create_network_infrastructure(
- TESTCASE_CONFIG.net_name,
- TESTCASE_CONFIG.subnet_name,
- TESTCASE_CONFIG.subnet_cidr,
- TESTCASE_CONFIG.router_name)
-
- sg = openstack_sfc.create_security_group(TESTCASE_CONFIG.secgroup_name)
-
- vnf_name = 'testVNF1'
- # Using seed=0 uses the baseline topology: everything in the same host
- testTopology = topo_shuffler.topology([vnf_name], openstack_sfc, seed=0)
- logger.info('This test is run with the topology {0}'
- .format(testTopology['id']))
- logger.info('Topology description: {0}'
- .format(testTopology['description']))
-
- client_instance, client_creator = openstack_sfc.create_instance(
- CLIENT, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['client'])
-
- server_instance, server_creator = openstack_sfc.create_instance(
- SERVER, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['server'])
-
- server_ip = server_instance.ports[0].ips[0]['ip_address']
- logger.info("Server instance received private ip [{}]".format(server_ip))
-
- tosca_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd)
-
- default_param_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- COMMON_CONFIG.vnfd_default_params_file)
-
- os_sfc_utils.create_vnfd(tacker_client, tosca_file=tosca_file)
- test_utils.create_vnf_in_av_zone(
- tacker_client,
- vnf_name,
- 'test-vnfd1',
- default_param_file,
- testTopology[vnf_name])
-
- vnf_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnf_name)
- if vnf_id is None:
- logger.error('ERROR while booting VNF')
- sys.exit(1)
-
- os_sfc_utils.create_sfc(
- tacker_client,
- sfc_name='red',
- chain_vnf_names=[vnf_name],
- symmetrical=True)
-
- os_sfc_utils.create_sfc_classifier(
- tacker_client, 'red_http', sfc_name='red',
- match={
- 'source_port': 0,
- 'dest_port': 80,
- 'protocol': 6
- })
-
- # FIXME: JIRA SFC-86
- # Tacker does not allow to specify the direction of the chain to be used,
- # only references the SFP (which for symmetric chains results in two RSPs)
- os_sfc_utils.create_sfc_classifier(
- tacker_client, 'red_http_reverse', sfc_name='red',
- match={
- 'source_port': 80,
- 'dest_port': 0,
- 'protocol': 6
- })
-
- logger.info(test_utils.run_cmd('tacker sfc-list'))
- logger.info(test_utils.run_cmd('tacker sfc-classifier-list'))
-
- # Start measuring the time it takes to implement the classification rules
- t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip, odl_port,))
-
- try:
- t1.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
-
- logger.info("Assigning floating IPs to instances")
- client_floating_ip = openstack_sfc.assign_floating_ip(router,
- client_instance,
- client_creator)
- server_floating_ip = openstack_sfc.assign_floating_ip(router,
- server_instance,
- server_creator)
- fips_sfs = openstack_sfc.assign_floating_ip_vnfs(router)
- sf_floating_ip = fips_sfs[0]
-
- fips = [client_floating_ip, server_floating_ip, fips_sfs[0]]
-
- for ip in fips:
- logger.info("Checking connectivity towards floating IP [%s]" % ip)
- if not test_utils.ping(ip, retries=50, retry_timeout=3):
- logger.error("Cannot ping floating IP [%s]" % ip)
- sys.exit(1)
- logger.info("Successful ping to floating IP [%s]" % ip)
-
- if not test_utils.check_ssh([sf_floating_ip]):
- logger.error("Cannot establish SSH connection to the SFs")
- sys.exit(1)
-
- logger.info("Starting HTTP server on %s" % server_floating_ip)
- if not test_utils.start_http_server(server_floating_ip):
- logger.error('\033[91mFailed to start the HTTP server\033[0m')
- sys.exit(1)
-
- blocked_port = TESTCASE_CONFIG.blocked_source_port
- logger.info("Firewall started, blocking traffic port %d" % blocked_port)
- test_utils.start_vxlan_tool(sf_floating_ip, block=blocked_port)
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- t1.join()
-
- allowed_port = TESTCASE_CONFIG.allowed_source_port
- logger.info("Test if HTTP from port %s works" % allowed_port)
- if not test_utils.is_http_blocked(
- client_floating_ip, server_ip, allowed_port):
- results.add_to_summary(2, "PASS", "HTTP works")
- else:
- error = ('\033[91mTEST 1 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP works")
-
- logger.info("Test if HTTP from port %s is blocked" % blocked_port)
- if test_utils.is_http_blocked(
- client_floating_ip, server_ip, blocked_port):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
- else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP Blocked")
- return results.compile_summary(), openstack_sfc.creators
+
+class SfcSymmetricChain(sfc_parent_function.SfcCommonTestCase):
+ """One client and one server are created using nova.
+ The server will be running a web server on port 80.
+ Then one Service Function (SF) is created using Tacker.
+ This service function will be running a firewall that
+ blocks the traffic in a specific port.
+ A symmetric service chain routing the traffic throught
+ this SF will be created as well.
+ The purpose is to check different HTTP traffic
+ combinations using firewall.
+ """
+
+ def run(self):
+
+ logger.info("The test scenario %s is starting", __name__)
+
+ self.register_vnf_template(self.testcase_config.test_vnfd,
+ 'test-vnfd1')
+ self.create_vnf(self.vnfs[0], 'test-vnfd1', 'test-vim', symmetric=True)
+
+ self.create_vnffg(self.testcase_config.test_vnffgd, 'red-symmetric',
+ 'red_http', port=80, protocol='tcp', symmetric=True)
+
+ # Start measuring the time it takes to implement the classification
+ # rules
+ t1 = threading.Thread(target=symmetric_wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.server_instance.hypervisor_hostname,
+ self.port_server,
+ self.client_instance.hypervisor_hostname,
+ self.port_client,
+ self.odl_ip, self.odl_port,))
+ try:
+ t1.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
+
+ logger.info("Assigning floating IPs to instances")
+ self.assign_floating_ip_client_server()
+
+ self.assign_floating_ip_sfs()
+
+ self.check_floating_ips()
+
+ self.start_services_in_vm()
+
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
+
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t1.join()
+
+ results = self.present_results_allowed_port_http(self.testcase_config)
+
+ self.vxlan_blocking_stop(self.fips_sfs[0])
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', "80")
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
+
+ results = self.present_results_blocked_port_http(self.testcase_config,
+ 'HTTP uplink')
+
+ self.vxlan_blocking_stop(self.fips_sfs[0])
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0',
+ self.testcase_config.source_port)
+
+ results = self.present_results_blocked_port_http(self.testcase_config,
+ 'HTTP downlink')
+
+ self.vxlan_blocking_stop(self.fips_sfs[0])
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
+ results = self.present_results_allowed_http()
+
+ if __name__ == '__main__':
+ return results.compile_summary(), self.creators
+
+ if __name__ == 'sfc.tests.functest.sfc_symmetric_chain':
+ return results.compile_summary(), self.creators
+
+ def get_creators(self):
+ """Return the creators info, specially in case the info is not
+ returned due to an exception.
+
+ :return: creators
+ """
+ return self.creators
+
+
+def symmetric_wait_for_classification_rules(ovs_logger, compute_nodes,
+ server_compute, server_port,
+ client_compute, client_port,
+ odl_ip, odl_port):
+ if client_compute == server_compute:
+ odl_utils.wait_for_classification_rules(
+ ovs_logger,
+ compute_nodes,
+ odl_ip,
+ odl_port,
+ client_compute,
+ [server_port, client_port])
+ else:
+ odl_utils.wait_for_classification_rules(
+ ovs_logger,
+ compute_nodes,
+ odl_ip,
+ odl_port,
+ server_compute,
+ server_port)
+ odl_utils.wait_for_classification_rules(
+ ovs_logger,
+ compute_nodes,
+ odl_ip,
+ odl_port,
+ client_compute,
+ client_port)
if __name__ == '__main__':
- logging.config.fileConfig(COMMON_CONFIG.functest_logging_api)
- main()
+
+ # Disable InsecureRequestWarning errors when executing the SFC tests in XCI
+ urllib3.disable_warnings()
+
+ TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_symmetric_chain')
+ supported_installers = ['fuel', 'apex', 'osa', 'compass']
+ vnf_names = ['testVNF1']
+
+ test_run = SfcSymmetricChain(TESTCASE_CONFIG, supported_installers,
+ vnf_names)
+ test_run.run()
diff --git a/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py b/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
index d7eb2994..92c2711e 100644
--- a/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
+++ b/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
@@ -1,4 +1,4 @@
-#!/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
@@ -8,302 +8,120 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
-import os
-import sys
import threading
import logging
+import urllib3
-import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
-import opnfv.utils.ovs_logger as ovs_log
-
import sfc.lib.config as sfc_config
-import sfc.lib.test_utils as test_utils
-from sfc.lib.results import Results
-from opnfv.deployment.factory import Factory as DeploymentFactory
-import sfc.lib.topology_shuffler as topo_shuffler
-
+from sfc.tests.functest import sfc_parent_function
+""" logging configuration """
logger = logging.getLogger(__name__)
-CLIENT = "client"
-SERVER = "server"
-COMMON_CONFIG = sfc_config.CommonConfig()
-TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_two_chains_SSH_and_HTTP')
-
-
-def main():
- deploymentHandler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
-
- installer_type = os.environ.get("INSTALLER_TYPE")
-
- supported_installers = ['fuel', 'apex', 'osa']
-
- if installer_type not in supported_installers:
- logger.error(
- '\033[91mYour installer is not supported yet\033[0m')
- sys.exit(1)
-
- installer_ip = os.environ.get("INSTALLER_IP")
- if not installer_ip:
- logger.error(
- '\033[91minstaller ip is not set\033[0m')
- logger.error(
- '\033[91mexport INSTALLER_IP=<ip>\033[0m')
- sys.exit(1)
-
- cluster = COMMON_CONFIG.installer_cluster
- openstack_nodes = (deploymentHandler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deploymentHandler.get_nodes())
-
- controller_nodes = [node for node in openstack_nodes
- if node.is_controller()]
- compute_nodes = [node for node in openstack_nodes
- if node.is_compute()]
-
- odl_ip, odl_port = odl_utils.get_odl_ip_port(openstack_nodes)
-
- for compute in compute_nodes:
- logger.info("This is a compute: %s" % compute.ip)
-
- results = Results(COMMON_CONFIG.line_length)
- results.add_to_summary(0, "=")
- results.add_to_summary(2, "STATUS", "SUBTEST")
- results.add_to_summary(0, "=")
-
- openstack_sfc = os_sfc_utils.OpenStackSFC()
-
- custom_flv = openstack_sfc.create_flavor(
- COMMON_CONFIG.flavor,
- COMMON_CONFIG.ram_size_in_mb,
- COMMON_CONFIG.disk_size_in_gb,
- COMMON_CONFIG.vcpu_count)
- if not custom_flv:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
-
- tacker_client = os_sfc_utils.get_tacker_client()
-
- controller_clients = test_utils.get_ssh_clients(controller_nodes)
- compute_clients = test_utils.get_ssh_clients(compute_nodes)
-
- ovs_logger = ovs_log.OVSLogger(
- os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
- COMMON_CONFIG.functest_results_dir)
-
- image_creator = openstack_sfc.register_glance_image(
- COMMON_CONFIG.image_name,
- COMMON_CONFIG.image_url,
- COMMON_CONFIG.image_format,
- 'public')
- network, router = openstack_sfc.create_network_infrastructure(
- TESTCASE_CONFIG.net_name,
- TESTCASE_CONFIG.subnet_name,
- TESTCASE_CONFIG.subnet_cidr,
- TESTCASE_CONFIG.router_name)
+class SfcTwoChainsSSHandHTTP(sfc_parent_function.SfcCommonTestCase):
+ """We create one client and one server using nova.
+ Then, 2 SFs are created using tacker.
+ Two chains are created, having one SF each.
+ The vxlan tool is used on both SFs. The purpose is to
+ check different HTTP and SSH traffic combinations.
+ """
- sg = openstack_sfc.create_security_group(TESTCASE_CONFIG.secgroup_name)
+ def run(self):
- vnf_names = ['testVNF1', 'testVNF2']
-
- topo_seed = topo_shuffler.get_seed() # change to None for nova av zone
- testTopology = topo_shuffler.topology(vnf_names, openstack_sfc,
- seed=topo_seed)
-
- logger.info('This test is run with the topology {0}'
- .format(testTopology['id']))
- logger.info('Topology description: {0}'
- .format(testTopology['description']))
-
- client_instance, client_creator = openstack_sfc.create_instance(
- CLIENT, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['client'])
-
- server_instance, server_creator = openstack_sfc.create_instance(
- SERVER, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['server'])
-
- server_ip = server_instance.ports[0].ips[0]['ip_address']
-
- os_sfc_utils.register_vim(tacker_client, vim_file=COMMON_CONFIG.vim_file)
-
- tosca_red = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_red)
- os_sfc_utils.create_vnfd(tacker_client,
- tosca_file=tosca_red,
- vnfd_name='test-vnfd1')
-
- tosca_blue = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_blue)
-
- os_sfc_utils.create_vnfd(tacker_client,
- tosca_file=tosca_blue,
- vnfd_name='test-vnfd2')
+ logger.info("The test scenario %s is starting", __name__)
- default_param_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- COMMON_CONFIG.vnfd_default_params_file)
+ self.register_vnf_template(self.testcase_config.test_vnfd_red,
+ 'test-vnfd1')
+ self.register_vnf_template(self.testcase_config.test_vnfd_blue,
+ 'test-vnfd2')
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client, vnf_names[0], 'test-vnfd1', 'test-vim',
- default_param_file, testTopology[vnf_names[0]])
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client, vnf_names[1], 'test-vnfd2', 'test-vim',
- default_param_file, testTopology[vnf_names[1]])
+ self.create_vnf(self.vnfs[0], 'test-vnfd1', 'test-vim')
+ self.create_vnf(self.vnfs[1], 'test-vnfd2', 'test-vim')
- vnf1_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnf_names[0])
- vnf2_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnf_names[1])
- if vnf1_id is None or vnf2_id is None:
- logger.error('ERROR while booting vnfs')
- sys.exit(1)
+ logger.info("Call Parent create_vnffg with index")
+ self.create_vnffg(self.testcase_config.test_vnffgd_red, 'red',
+ 'red_http', port=80, protocol='tcp',
+ symmetric=False, vnf_index=0)
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- TESTCASE_CONFIG.test_vnffgd_red)
+ self.create_vnffg(self.testcase_config.test_vnffgd_blue, 'blue',
+ 'blue_ssh', port=22, protocol='tcp',
+ symmetric=False, vnf_index=1)
+ self.create_classifier('dummy')
- os_sfc_utils.create_vnffgd(tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='red')
+ t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.hypervisor_hostname,
+ [self.neutron_port],))
+ try:
+ t1.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
- neutron_port = openstack_sfc.get_client_port_id(client_instance)
- os_sfc_utils.create_vnffg_with_param_file(tacker_client, 'red',
- 'red_http',
- default_param_file,
- neutron_port.id)
+ logger.info("Assigning floating IPs to instances")
+ self.assign_floating_ip_client_server()
- # Start measuring the time it takes to implement the classification rules
- t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip, odl_port,))
+ self.assign_floating_ip_sfs()
- try:
- t1.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
+ self.check_floating_ips()
+ self.start_services_in_vm()
+ self.vxlan_blocking_start(self.fips_sfs[0], "80")
+ self.vxlan_blocking_start(self.fips_sfs[1], "22")
- logger.info("Assigning floating IPs to instances")
- client_floating_ip = openstack_sfc.assign_floating_ip(router,
- client_instance,
- client_creator)
- server_floating_ip = openstack_sfc.assign_floating_ip(router,
- server_instance,
- server_creator)
- fips_sfs = openstack_sfc.assign_floating_ip_vnfs(router)
- sf1_floating_ip = fips_sfs[0]
- sf2_floating_ip = fips_sfs[1]
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t1.join()
- fips = [client_floating_ip, server_floating_ip, sf1_floating_ip,
- sf2_floating_ip]
+ results = self.present_results_ssh()
+ results = self.present_results_http()
- for ip in fips:
- logger.info("Checking connectivity towards floating IP [%s]" % ip)
- if not test_utils.ping(ip, retries=50, retry_timeout=3):
- logger.error("Cannot ping floating IP [%s]" % ip)
- os_sfc_utils.get_tacker_items()
- odl_utils.get_odl_items(odl_ip, odl_port)
- sys.exit(1)
- logger.info("Successful ping to floating IP [%s]" % ip)
+ logger.info("Changing the classification")
- if not test_utils.check_ssh([sf1_floating_ip, sf2_floating_ip]):
- logger.error("Cannot establish SSH connection to the SFs")
- sys.exit(1)
+ self.swap_classifiers('red_http', 'blue_ssh')
- logger.info("Starting HTTP server on %s" % server_floating_ip)
- if not test_utils.start_http_server(server_floating_ip):
- logger.error('\033[91mFailed to start HTTP server on %s\033[0m'
- % server_floating_ip)
- sys.exit(1)
+ # Start measuring the time it takes to implement the classification
+ # rules
+ t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.hypervisor_hostname,
+ [self.neutron_port],))
+ try:
+ t2.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
- logger.info("Starting SSH firewall on %s" % sf1_floating_ip)
- test_utils.start_vxlan_tool(sf1_floating_ip, block="22")
- logger.info("Starting HTTP firewall on %s" % sf2_floating_ip)
- test_utils.start_vxlan_tool(sf2_floating_ip, block="80")
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t2.join()
- logger.info("Wait for ODL to update the classification rules in OVS")
- t1.join()
+ results = self.present_results_allowed_http()
+ results = self.present_results_allowed_ssh()
- logger.info("Test SSH")
- if test_utils.is_ssh_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "SSH Blocked")
- else:
- error = ('\033[91mTEST 1 [FAILED] ==> SSH NOT BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "SSH Blocked")
+ if __name__ == '__main__':
+ return results.compile_summary(), self.creators
- logger.info("Test HTTP")
- if not test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP works")
- else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP works")
+ if __name__ == 'sfc.tests.functest.sfc_two_chains_SSH_and_HTTP':
+ return results.compile_summary(), self.creators
- logger.info("Changing the classification")
+ def get_creators(self):
+ """Return the creators info, specially in case the info is not
+ returned due to an exception.
- os_sfc_utils.delete_vnffg(tacker_client, vnffg_name='red_http_works')
+ :return: creators
+ """
+ return self.creators
- os_sfc_utils.delete_vnffgd(tacker_client, vnffgd_name='red')
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- TESTCASE_CONFIG.test_vnffgd_blue)
-
- os_sfc_utils.create_vnffgd(tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='blue')
-
- os_sfc_utils.create_vnffg_with_param_file(tacker_client, 'blue',
- 'blue_ssh',
- default_param_file,
- neutron_port)
-
- # Start measuring the time it takes to implement the classification rules
- t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip, odl_port,))
- try:
- t2.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- t2.join()
-
- logger.info("Test HTTP")
- if test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
- else:
- error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP Blocked")
-
- logger.info("Test SSH")
- if not test_utils.is_ssh_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "SSH works")
- else:
- error = ('\033[91mTEST 4 [FAILED] ==> SSH BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "SSH works")
+if __name__ == '__main__':
- return results.compile_summary(), openstack_sfc.creators
+ # Disable InsecureRequestWarning errors when executing the SFC tests in XCI
+ urllib3.disable_warnings()
+ TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_two_chains_SSH_and_HTTP')
+ supported_installers = ['fuel', 'apex', 'osa', 'compass']
+ vnf_names = ['testVNF1', 'testVNF2']
-if __name__ == '__main__':
- logging.config.fileConfig(COMMON_CONFIG.functest_logging_api)
- main()
+ test_run = SfcTwoChainsSSHandHTTP(TESTCASE_CONFIG, supported_installers,
+ vnf_names)
+ test_run.run()
diff --git a/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd1.yaml b/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd1.yaml
index 4042222c..cc5947c6 100644
--- a/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd1.yaml
+++ b/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd1.yaml
@@ -15,10 +15,10 @@ topology_template:
nfv_compute:
properties:
num_cpus: 1
- mem_size: 2 GB
- disk_size: 10 GB
+ mem_size: 500 MB
+ disk_size: 1 GB
properties:
- image: sfc_nsh_euphrates
+ image: sfc_nsh_fraser
availability_zone: {get_input: zone}
mgmt_driver: noop
config: |
diff --git a/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd2.yaml b/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd2.yaml
index 42308c6c..395245a9 100644
--- a/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd2.yaml
+++ b/sfc/tests/functest/vnfd-templates/test-one-chain-vnfd2.yaml
@@ -15,10 +15,10 @@ topology_template:
nfv_compute:
properties:
num_cpus: 1
- mem_size: 2 GB
- disk_size: 10 GB
+ mem_size: 500 MB
+ disk_size: 1 GB
properties:
- image: sfc_nsh_euphrates
+ image: sfc_nsh_fraser
availability_zone: {get_input: zone}
mgmt_driver: noop
config: |
diff --git a/sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml b/sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml
index 1f4c11f6..bf175ef7 100644
--- a/sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml
+++ b/sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml
@@ -15,12 +15,15 @@ topology_template:
nfv_compute:
properties:
num_cpus: 1
- mem_size: 2 GB
- disk_size: 10 GB
+ mem_size: 500 MB
+ disk_size: 1 GB
properties:
- image: sfc_nsh_euphrates
+ image: sfc_nsh_fraser
availability_zone: {get_input: zone}
mgmt_driver: noop
+ config: |
+ param0: key1
+ param1: key2
service_type: firewall
monitoring_policy:
name: ping
@@ -46,6 +49,18 @@ topology_template:
- virtualBinding:
node: VDU1
+ CP2:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ management: false
+ order: 1
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL1
+ - virtualBinding:
+ node: VDU1
+
VL1:
type: tosca.nodes.nfv.VL
properties:
diff --git a/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd1.yaml b/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd1.yaml
index 4042222c..cc5947c6 100644
--- a/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd1.yaml
+++ b/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd1.yaml
@@ -15,10 +15,10 @@ topology_template:
nfv_compute:
properties:
num_cpus: 1
- mem_size: 2 GB
- disk_size: 10 GB
+ mem_size: 500 MB
+ disk_size: 1 GB
properties:
- image: sfc_nsh_euphrates
+ image: sfc_nsh_fraser
availability_zone: {get_input: zone}
mgmt_driver: noop
config: |
diff --git a/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd2.yaml b/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd2.yaml
index ac4f223d..df719b9e 100644
--- a/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd2.yaml
+++ b/sfc/tests/functest/vnfd-templates/test-two-chains-vnfd2.yaml
@@ -15,10 +15,10 @@ topology_template:
nfv_compute:
properties:
num_cpus: 1
- mem_size: 2 GB
- disk_size: 10 GB
+ mem_size: 500 MB
+ disk_size: 1 GB
properties:
- image: sfc_nsh_euphrates
+ image: sfc_nsh_fraser
availability_zone: {get_input: zone}
mgmt_driver: noop
config: |
diff --git a/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml b/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml
new file mode 100644
index 00000000..fd549079
--- /dev/null
+++ b/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml
@@ -0,0 +1,40 @@
+---
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: test-case2_HTTP Test
+
+topology_template:
+ description: topology-template-test2
+ inputs:
+ net_src_port_id:
+ type: string
+
+ node_templates:
+ Forwarding_path1:
+ type: tosca.nodes.nfv.FP.TackerV2
+ description: creates path
+ properties:
+ id: 1
+ policy:
+ type: ACL
+ criteria:
+ - name: http_classifier
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 80-80
+ ip_proto: 6
+ path:
+ - forwarder: test-vnfd1
+ capability: CP1
+
+ groups:
+ VNFFG1:
+ type: tosca.groups.nfv.VNFFG
+ description: creates chain
+ properties:
+ vendor: tacker
+ version: 1.0
+ number_of_endpoints: 1
+ dependent_virtual_link: [VL1]
+ connection_point: [CP1]
+ constituent_vnfs: [test-vnfd1]
+ members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml b/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml
index 27c7d545..4dcc0f3c 100644
--- a/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml
+++ b/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml
@@ -10,16 +10,18 @@ topology_template:
node_templates:
Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
+ type: tosca.nodes.nfv.FP.TackerV2
description: creates path
properties:
id: 1
policy:
type: ACL
criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 80-80
- - ip_proto: 6
+ - name: http_classifier
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 80-80
+ ip_proto: 6
path:
- forwarder: test-vnfd1
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml b/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml
new file mode 100644
index 00000000..371d25fe
--- /dev/null
+++ b/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml
@@ -0,0 +1,48 @@
+---
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: test-case-symmetric
+
+topology_template:
+ description: topology-template-test1
+ inputs:
+ net_src_port_id:
+ type: string
+ net_dst_port_id:
+ type: string
+ ip_dst_prefix:
+ type: string
+
+ node_templates:
+ Forwarding_path1:
+ type: tosca.nodes.nfv.FP.TackerV2
+ description: creates path
+ properties:
+ id: 1
+ policy:
+ type: ACL
+ criteria:
+ - name: http_classifier
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ network_dst_port_id: {get_input: net_dst_port_id}
+ ip_dst_prefix: {get_input: ip_dst_prefix}
+ destination_port_range: 80-80
+ ip_proto: 6
+ path:
+ - forwarder: test-vnfd1
+ capability: CP1
+ - forwarder: test-vnfd1
+ capability: CP2
+
+ groups:
+ VNFFG1:
+ type: tosca.groups.nfv.VNFFG
+ description: creates chain
+ properties:
+ vendor: tacker
+ version: 1.0
+ number_of_endpoints: 2
+ dependent_virtual_link: [VL1, VL1]
+ connection_point: [CP1, CP2]
+ constituent_vnfs: [test-vnfd1, test-vnfd1]
+ members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml
index f0615e4e..ceee363b 100644
--- a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml
+++ b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml
@@ -1,25 +1,32 @@
---
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: test-case2_HTTP Test
+description: test-two-chains_HTTP Test
topology_template:
- description: topology-template-test2
+ description: topology-template-test-two-chains
inputs:
net_src_port_id:
type: string
node_templates:
Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
+ type: tosca.nodes.nfv.FP.TackerV2
description: creates path
properties:
id: 1
policy:
type: ACL
criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 22-80
- - ip_proto: 6
+ - name: get_ssh
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 22-22
+ ip_proto: 6
+ - name: get_http
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 80-80
+ ip_proto: 6
path:
- forwarder: test-vnfd1
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml
index ec18c9d6..15739cc7 100644
--- a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml
+++ b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml
@@ -1,26 +1,32 @@
---
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: test-case2_SSH Test
+description: test-two-chains_SSH Test
topology_template:
- description: topology-template-test2
+ description: topology-template-test-two-chains
inputs:
net_src_port_id:
type: string
-
node_templates:
Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
+ type: tosca.nodes.nfv.FP.TackerV2
description: creates path
properties:
id: 2
policy:
type: ACL
criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 22-80
- - ip_proto: 6
+ - name: get_ssh
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 22-22
+ ip_proto: 6
+ - name: get_http
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 80-80
+ ip_proto: 6
path:
- forwarder: test-vnfd2
capability: CP1
diff --git a/sfc/tests/functest/setup_scripts/__init__.py b/sfc/unit_tests/__init__.py
index e69de29b..e69de29b 100644
--- a/sfc/tests/functest/setup_scripts/__init__.py
+++ b/sfc/unit_tests/__init__.py
diff --git a/sfc/unit_tests/unit/__init__.py b/sfc/unit_tests/unit/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/sfc/unit_tests/unit/__init__.py
diff --git a/sfc/unit_tests/unit/lib/test_cleanup.py b/sfc/unit_tests/unit/lib/test_cleanup.py
new file mode 100644
index 00000000..e6f59d23
--- /dev/null
+++ b/sfc/unit_tests/unit/lib/test_cleanup.py
@@ -0,0 +1,469 @@
+#!/usr/bin/env python
+
+###############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+import unittest
+import sfc.lib.cleanup as cleanup
+
+from mock import patch
+from mock import call
+from mock import DEFAULT
+from mock import Mock
+
+
+__author__ = "Dimitrios Markou <mardim@intracom-telecom.com>"
+
+
+class SfcCleanupTesting(unittest.TestCase):
+
+ def setUp(self):
+ self.odl_ip = '10.10.10.10'
+ self.odl_port = '8081'
+ self.patcher = patch('sfc.lib.openstack_utils.get_tacker_client')
+ self.mock_tacker_client = self.patcher.start()
+ self.mock_tacker_client.return_value = 'tacker_client_obj'
+
+ def tearDown(self):
+ self.patcher.stop()
+
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.odl_utils.delete_odl_resource_elem')
+ @patch('sfc.lib.odl_utils.odl_resource_list_names')
+ @patch('sfc.lib.odl_utils.get_odl_resource_list')
+ def test_delete_odl_resource(self, mock_resource_list,
+ mock_resource_list_name,
+ mock_del_resource_elem,
+ mock_log):
+ """
+ Checks if the functions which belong to the odl_utils
+ library are getting called.
+ """
+
+ resource = 'mock_resource'
+ log_calls = [call("Removing ODL resource: mock_resource/elem_one"),
+ call("Removing ODL resource: mock_resource/elem_two")]
+
+ del_calls = [call(self.odl_ip, self.odl_port, resource, 'elem_one'),
+ call(self.odl_ip, self.odl_port, resource, 'elem_two')]
+
+ mock_resource_list_name.return_value = ['elem_one', 'elem_two']
+ mock_resource_list.return_value = ['rsrc_one',
+ 'rsrc_two',
+ 'rsrc_three']
+
+ cleanup.delete_odl_resources(self.odl_ip, self.odl_port, resource)
+
+ mock_resource_list.assert_called_once_with(self.odl_ip,
+ self.odl_port,
+ resource)
+ mock_resource_list_name.assert_called_once_with(
+ resource, ['rsrc_one', 'rsrc_two', 'rsrc_three'])
+ mock_del_resource_elem.assert_has_calls(del_calls)
+ mock_log.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.odl_utils.get_odl_acl_list')
+ @patch('sfc.lib.odl_utils.odl_acl_types_names')
+ @patch('sfc.lib.odl_utils.delete_odl_acl')
+ def test_delete_odl_ietf_access_lists(self,
+ mock_del_acl,
+ mock_acl_types,
+ mock_get_acls):
+ """
+ Ckecks the proper functionality of the delete_odl_ietf_access_lists
+ function
+ """
+
+ mock_acl_type_name_list = [('acl_type_one', 'name_one'),
+ ('acl_type_two', 'name_two')]
+ mock_get_acls.return_value = ['acl_one', 'acl_two']
+ mock_acl_types.return_value = mock_acl_type_name_list
+ del_calls = [call(self.odl_ip, self.odl_port, key, value)
+ for key, value in mock_acl_type_name_list]
+
+ cleanup.delete_odl_ietf_access_lists(self.odl_ip, self.odl_port)
+
+ mock_get_acls.assert_called_once_with(self.odl_ip, self.odl_port)
+ mock_acl_types.assert_called_once_with(['acl_one', 'acl_two'])
+ mock_del_acl.assert_has_calls(del_calls)
+
+ @patch('sfc.lib.openstack_utils.list_vnfds', return_value=None)
+ def test_delete_vnfds_returned_list_is_none(self, mock_list_vnfds):
+ """
+ Check the proper functionality of the delete_vnfds
+ function when the returned vnfds list is None
+ """
+
+ self.assertIsNone(cleanup.delete_vnfds())
+ mock_list_vnfds.assert_called_once_with('tacker_client_obj')
+
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.openstack_utils.delete_vnfd')
+ @patch('sfc.lib.openstack_utils.list_vnfds')
+ def test_delete_vnfds_not_empty_list(self,
+ mock_list_vnfds,
+ mock_del_vnfd,
+ mock_log):
+ """
+ Check the proper functionality of the delete_vnfds
+ function when the returned vnfds list is not empty
+ """
+
+ mock_list_vnfds.return_value = ['vnfd_one', 'vnfd_two']
+ log_calls = [call("Removing vnfd: vnfd_one"),
+ call("Removing vnfd: vnfd_two")]
+
+ del_calls = [call('tacker_client_obj', vnfd_id='vnfd_one'),
+ call('tacker_client_obj', vnfd_id='vnfd_two')]
+
+ cleanup.delete_vnfds()
+ mock_list_vnfds.assert_called_once_with('tacker_client_obj')
+ mock_log.assert_has_calls(log_calls)
+ mock_del_vnfd.assert_has_calls(del_calls)
+
+ @patch('sfc.lib.openstack_utils.list_vnfs', return_value=None)
+ def test_delete_vnfs_returned_list_is_none(self, mock_list_vnfs):
+ """
+ Check the proper functionality of the delete_vnfs
+ function when the returned vnfs list is None
+ """
+
+ self.assertIsNone(cleanup.delete_vnfs())
+ mock_list_vnfs.assert_called_once_with('tacker_client_obj')
+
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.openstack_utils.delete_vnf')
+ @patch('sfc.lib.openstack_utils.list_vnfs')
+ def test_delete_vnfs_not_empty_list(self,
+ mock_list_vnfs,
+ mock_del_vnf,
+ mock_log):
+ """
+ Check the proper functionality of the delete_vnfs
+ function when the returned vnfs list is not empty
+ """
+
+ mock_list_vnfs.return_value = ['vnf_one', 'vnf_two']
+ log_calls = [call("Removing vnf: vnf_one"),
+ call("Removing vnf: vnf_two")]
+
+ del_calls = [call('tacker_client_obj', vnf_id='vnf_one'),
+ call('tacker_client_obj', vnf_id='vnf_two')]
+
+ cleanup.delete_vnfs()
+ mock_list_vnfs.assert_called_once_with('tacker_client_obj')
+ mock_log.assert_has_calls(log_calls)
+ mock_del_vnf.assert_has_calls(del_calls)
+
+ @patch('sfc.lib.openstack_utils.list_vnffgs', return_value=None)
+ def test_delete_vnffgs_returned_list_is_none(self, mock_list_vnffgs):
+ """
+ Check the proper functionality of the delete_vnffgs
+ function when the returned vnffgs list is None
+ """
+
+ self.assertIsNone(cleanup.delete_vnffgs())
+ mock_list_vnffgs.assert_called_once_with('tacker_client_obj')
+
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.openstack_utils.delete_vnffg')
+ @patch('sfc.lib.openstack_utils.list_vnffgs')
+ def test_delete_vnffgs_not_empty_list(self,
+ mock_list_vnffgs,
+ mock_del_vnffg,
+ mock_log):
+ """
+ Check the proper functionality of the delete_vnffgs
+ function when the returned vnffgs list is not empty
+ """
+
+ mock_list_vnffgs.return_value = ['vnffg_one', 'vnffg_two']
+ log_calls = [call("Removing vnffg: vnffg_two"),
+ call("Removing vnffg: vnffg_one")]
+
+ del_calls = [call('tacker_client_obj', vnffg_id='vnffg_two'),
+ call('tacker_client_obj', vnffg_id='vnffg_one')]
+
+ cleanup.delete_vnffgs()
+ mock_list_vnffgs.assert_called_once_with('tacker_client_obj')
+ mock_log.assert_has_calls(log_calls)
+ mock_del_vnffg.assert_has_calls(del_calls)
+
+ @patch('sfc.lib.openstack_utils.list_vnffgds', return_value=None)
+ def test_delete_vnffgds_returned_list_is_none(self, mock_list_vnffgds):
+ """
+ Check the proper functionality of the delete_vnffgds
+ function when the returned vnffgds list is None
+ """
+
+ self.assertIsNone(cleanup.delete_vnffgds())
+ mock_list_vnffgds.assert_called_once_with('tacker_client_obj')
+
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.openstack_utils.delete_vnffgd')
+ @patch('sfc.lib.openstack_utils.list_vnffgds')
+ def test_delete_vnffgds_not_empty_list(self,
+ mock_list_vnffgds,
+ mock_del_vnffgd,
+ mock_log):
+ """
+ Check the proper functionality of the delete_vnffgds
+ function when the returned vnffgds list is not empty
+ """
+
+ mock_list_vnffgds.return_value = ['vnffgd_one', 'vnffgd_two']
+ log_calls = [call("Removing vnffgd: vnffgd_one"),
+ call("Removing vnffgd: vnffgd_two")]
+
+ del_calls = [call('tacker_client_obj', vnffgd_id='vnffgd_one'),
+ call('tacker_client_obj', vnffgd_id='vnffgd_two')]
+
+ cleanup.delete_vnffgds()
+ mock_list_vnffgds.assert_called_once_with('tacker_client_obj')
+ mock_log.assert_has_calls(log_calls)
+ mock_del_vnffgd.assert_has_calls(del_calls)
+
+ @patch('sfc.lib.openstack_utils.list_vims', return_value=None)
+ def test_delete_vims_returned_list_is_none(self, mock_list_vims):
+ """
+ Check the proper functionality of the delete_vims
+ function when the returned vims list is None
+ """
+
+ self.assertIsNone(cleanup.delete_vims())
+ mock_list_vims.assert_called_once_with('tacker_client_obj')
+
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.openstack_utils.delete_vim')
+ @patch('sfc.lib.openstack_utils.list_vims')
+ def test_delete_vims_not_empty_list(self,
+ mock_list_vims,
+ mock_del_vim,
+ mock_log):
+ """
+ Check the proper functionality of the delete_vims
+ function when the returned vims list is not empty
+ """
+
+ mock_list_vims.return_value = ['vim_one', 'vim_two']
+ log_calls = [call("Removing vim: vim_one"),
+ call("Removing vim: vim_two")]
+
+ del_calls = [call('tacker_client_obj', vim_id='vim_one'),
+ call('tacker_client_obj', vim_id='vim_two')]
+
+ cleanup.delete_vims()
+ mock_list_vims.assert_called_once_with('tacker_client_obj')
+ mock_log.assert_has_calls(log_calls)
+ mock_del_vim.assert_has_calls(del_calls)
+
+ @patch('sfc.lib.openstack_utils.OpenStackSFC', autospec=True)
+ def test_delete_untracked_security_groups(self,
+ mock_obj):
+ instance = mock_obj.return_value
+ cleanup.delete_untracked_security_groups()
+ instance.delete_all_security_groups.assert_called_once()
+
+ @patch('sfc.lib.cleanup.delete_odl_resources')
+ @patch('sfc.lib.cleanup.delete_odl_ietf_access_lists')
+ def test_cleanup_odl(self,
+ mock_del_odl_ietf,
+ mock_del_odl_res):
+ resources = ['service-function-forwarder']
+
+ odl_res_calls = [call(self.odl_ip, self.odl_port, item)
+ for item in resources]
+
+ cleanup.cleanup_odl(self.odl_ip, self.odl_port)
+
+ mock_del_odl_res.assert_has_calls(odl_res_calls)
+ mock_del_odl_ietf.assert_called_once_with(self.odl_ip, self.odl_port)
+
+ @patch('sfc.lib.openstack_utils.OpenStackSFC', autospec=True)
+ def test_cleanup_nsfc_objects(self, mock_os_sfc):
+ mock_os_sfc_ins = mock_os_sfc.return_value
+ cleanup.cleanup_nsfc_objects()
+ mock_os_sfc_ins.delete_chain.assert_called_once()
+ mock_os_sfc_ins.delete_port_groups.assert_called_once()
+
+ @patch('time.sleep')
+ def test_cleanup_tacker_objects(self, mock_time):
+
+ mock_dict = {'delete_vnffgs': DEFAULT,
+ 'delete_vnffgds': DEFAULT,
+ 'delete_vnfs': DEFAULT,
+ 'delete_vnfds': DEFAULT,
+ 'delete_vims': DEFAULT}
+ with patch.multiple('sfc.lib.cleanup',
+ **mock_dict) as mock_values:
+ cleanup.cleanup_tacker_objects()
+
+ for key in mock_values:
+ mock_values[key].assert_called_once()
+
+ mock_time.assert_called_once_with(20)
+
+ @patch('sfc.lib.cleanup.cleanup_tacker_objects')
+ def test_cleanup_mano_objects_tacker(self, mock_cleanup_tacker):
+ cleanup.cleanup_mano_objects('tacker')
+ mock_cleanup_tacker.assert_called_once()
+
+ @patch('sfc.lib.cleanup.cleanup_nsfc_objects')
+ def test_cleanup_mano_objects_nsfc(self, mock_cleanup_nsfc):
+ cleanup.cleanup_mano_objects('no-mano')
+ mock_cleanup_nsfc.assert_called_once()
+
+ @patch('sfc.lib.cleanup.connection')
+ @patch('sfc.lib.cleanup.logger.info')
+ def test_delete_openstack_objects(self, mock_log, mock_conn):
+ """
+ Checks the delete_chain method
+ """
+ testcase_config = Mock()
+ conn = Mock()
+ mock_creator_obj_one = Mock()
+ mock_creator_obj_one.name = 'subnet_name'
+ mock_creator_obj_two = Mock()
+ mock_creator_obj_two.name = 'creator_name'
+ mock_creator_objs_list = [mock_creator_obj_one, mock_creator_obj_two]
+
+ mock_conn.from_config.return_value = conn
+ testcase_config.subnet_name = mock_creator_obj_one.name
+ log_calls = [call('Deleting ' + mock_creator_obj_two.name),
+ call('Deleting ' + mock_creator_obj_one.name)]
+
+ cleanup.delete_openstack_objects(testcase_config,
+ mock_creator_objs_list)
+ mock_creator_obj_one.delete.\
+ assert_called_once_with(conn.session)
+ mock_creator_obj_two.delete.\
+ assert_called_once_with(conn.session)
+ mock_log.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.cleanup.connection')
+ @patch('sfc.lib.cleanup.logger.info')
+ def test_delete_openstack_objects_router(self, mock_log, mock_conn):
+ """
+ Checks the delete_chain method
+ """
+ testcase_config = Mock()
+ conn = Mock()
+ mock_creator_obj = Mock()
+ mock_creator_obj.name = 'creator_name'
+ mock_creator_router = Mock()
+ mock_creator_router.name = 'router_name'
+ mock_creator_router.id = '1'
+ mock_creator_subnet = Mock()
+ mock_creator_subnet.name = 'subnet_name'
+ mock_creator_subnet.id = '2'
+ mock_creator_objs_list = [mock_creator_subnet,
+ mock_creator_router,
+ mock_creator_obj]
+
+ mock_conn.from_config.return_value = conn
+ testcase_config.router_name = mock_creator_router.name
+ testcase_config.subnet_name = mock_creator_subnet.name
+
+ conn.network.get_subnet.return_value = mock_creator_subnet
+ log_calls = [call('Deleting ' + mock_creator_obj.name),
+ call('Deleting ' + mock_creator_router.name),
+ call('Removing subnet from router'),
+ call('Deleting router'),
+ call('Deleting ' + mock_creator_subnet.name)]
+
+ cleanup.delete_openstack_objects(testcase_config,
+ mock_creator_objs_list)
+ conn.network.remove_interface_from_router.\
+ assert_called_once_with(mock_creator_router.id,
+ mock_creator_subnet.id)
+ conn.network.delete_router.\
+ assert_called_once_with(mock_creator_router)
+ mock_creator_obj.delete.\
+ assert_called_once_with(conn.session)
+ mock_creator_subnet.delete.\
+ assert_called_once_with(conn.session)
+ mock_log.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.cleanup.connection')
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.cleanup.logger.error')
+ def test_delete_openstack_objects_exception(self, mock_log_err,
+ mock_log_info, mock_conn):
+ """
+ Check the proper functionality of the delete_openstack_objects
+ function when exception occurs.
+ """
+ testcase_config = Mock()
+ conn = Mock()
+ mock_creator_obj_one = Mock()
+ mock_creator_obj_one.name = 'subnet_name'
+ mock_creator_obj_two = Mock()
+ mock_creator_obj_two.name = 'creator_name'
+ exception_one = Exception('First Boom!')
+ exception_two = Exception('Second Boom!')
+ attrs_list = [{'delete.side_effect': exception_one},
+ {'delete.side_effect': exception_two}]
+
+ mock_creator_obj_one.configure_mock(**attrs_list[0])
+ mock_creator_obj_two.configure_mock(**attrs_list[1])
+
+ mock_creator_objs_list = [mock_creator_obj_one, mock_creator_obj_two]
+ mock_conn.from_config.return_value = conn
+ testcase_config.subnet_name = mock_creator_obj_one.name
+
+ log_calls = [call('Deleting ' + mock_creator_obj_two.name),
+ call('Deleting ' + mock_creator_obj_one.name),
+ call('Unexpected error cleaning - %s', exception_two),
+ call('Unexpected error cleaning - %s', exception_one)]
+
+ cleanup.delete_openstack_objects(testcase_config,
+ mock_creator_objs_list)
+ mock_creator_obj_one.delete.\
+ assert_called_once_with(conn.session)
+ mock_creator_obj_two.delete.\
+ assert_called_once_with(conn.session)
+
+ mock_log_info.assert_has_calls(log_calls[:2])
+ mock_log_err.assert_has_calls(log_calls[2:])
+
+ @patch('sfc.lib.cleanup.delete_untracked_security_groups')
+ @patch('sfc.lib.cleanup.cleanup_mano_objects')
+ @patch('sfc.lib.cleanup.delete_openstack_objects')
+ @patch('sfc.lib.cleanup.cleanup_odl')
+ def test_cleanup(self,
+ mock_cleanup_odl,
+ mock_del_os_obj,
+ mock_cleanup_mano,
+ mock_untr_sec_grps):
+
+ cleanup.cleanup('testcase_config', ['creator_one', 'creator_two'],
+ 'mano',
+ self.odl_ip,
+ self.odl_port)
+
+ mock_cleanup_odl.assert_called_once_with(self.odl_ip,
+ self.odl_port)
+ mock_del_os_obj.assert_called_once_with('testcase_config',
+ ['creator_one', 'creator_two'])
+ mock_cleanup_mano.assert_called_once_with('mano')
+ mock_untr_sec_grps.assert_called_once()
+
+ @patch('sfc.lib.cleanup.cleanup_mano_objects')
+ @patch('sfc.lib.cleanup.cleanup_odl')
+ def test_cleanup_from_bash(self,
+ mock_cleanup_odl,
+ mock_cleanup_mano):
+
+ cleanup.cleanup_from_bash(self.odl_ip,
+ self.odl_port,
+ 'mano')
+
+ mock_cleanup_odl.assert_called_once_with(self.odl_ip,
+ self.odl_port)
+ mock_cleanup_mano.assert_called_once_with(mano='mano')
diff --git a/sfc/unit_tests/unit/lib/test_odl_utils.py b/sfc/unit_tests/unit/lib/test_odl_utils.py
new file mode 100644
index 00000000..1dfcf1ed
--- /dev/null
+++ b/sfc/unit_tests/unit/lib/test_odl_utils.py
@@ -0,0 +1,817 @@
+#!/usr/bin/env python
+
+###############################################################################
+# Copyright (c) 2018 Venkata Harshavardhan Reddy Allu and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+import unittest
+
+from mock import call
+from mock import Mock
+from mock import patch
+
+import sfc.lib.odl_utils as odl_utils
+
+__author__ = "Harshavardhan Reddy <venkataharshavardhan_ven@srmuniv.edu.in>"
+
+
+class SfcOdlUtilsTesting(unittest.TestCase):
+
+ @patch('re.compile', autospec=True)
+ @patch('opnfv.utils.ovs_logger.OVSLogger', autospec=True)
+ def test_actual_rsps_in_compute(self, mock_ovs_log, mock_compile):
+ """
+ Checks the proper functionality of actual_rsps_in_compute
+ function
+ """
+
+ match_calls = [call('msg_1'), call('msg_2')]
+
+ mf = Mock()
+ mf.group.side_effect = ['msg_p_1', 'msg_p_2']
+ mock_compile.return_value.match.side_effect = [mf, None]
+ mock_ovs_log.ofctl_dump_flows.return_value = '\nflow_rep\nmsg_1\nmsg_2'
+
+ result = odl_utils.actual_rsps_in_compute(mock_ovs_log, 'compute_ssh')
+
+ self.assertEqual(['msg_p_1|msg_p_2'], result)
+ mock_compile.return_value.match.assert_has_calls(match_calls)
+ mock_ovs_log.ofctl_dump_flows.assert_called_once_with('compute_ssh',
+ 'br-int', '101')
+
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.get_rsp', autospec=True)
+ @patch('sfc.lib.odl_utils.get_odl_acl_list', autospec=True)
+ @patch('sfc.lib.odl_utils.get_rsps_from_netvirt_acl_actions',
+ autospec=True)
+ def test_get_active_rsps_on_ports(self,
+ mock_rsps_from_netvirt_acl_actions,
+ mock_odl_acl_list,
+ mock_get_rsp,
+ mock_log):
+ """
+ Checks the proper functionality of get_active_rsps_on_ports
+ function
+ """
+
+ log_calls = [call('ACL acl_obj_one does not have an ACE')]
+
+ port_one = Mock()
+ port_two = Mock()
+ port_one.id = 's_p'
+ port_two.id = 'd_p'
+ neutron_ports = [port_one, port_two]
+
+ mock_rsps_from_netvirt_acl_actions.return_value = ['rsp_obj_one',
+ 'rsp_obj_two']
+
+ mock_get_rsp.side_effect = [{'of-matches': ['of-match-one'],
+ 'reverse-path': 'r-path-one'},
+ {'of-matches': ['of-match-two']}]
+
+ mock_odl_acl_list.return_value = {'access-lists': {'acl': [
+ {'acl-name': 'acl_obj_one',
+ 'access-list-entries': {'ace': []}},
+ {'acl-name': 'acl_obj_two',
+ 'access-list-entries': {'ace': [{'matches': {
+ 'destination-port-range': None}}]}},
+ {'acl-name': 'acl_obj_three',
+ 'access-list-entries': {'ace': [{'matches': {
+ 'destination-port-range': {'lower-port': 22},
+ 'netvirt-sfc-acl:source-port-uuid': 's_p_uuid',
+ 'netvirt-sfc-acl:destination-port-uuid': 'd_p_uuid'}}]}},
+ {'acl-name': 'acl_obj_four',
+ 'access-list-entries': {'ace': [{'matches': {
+ 'destination-port-range': {'lower-port': 22},
+ 'netvirt-sfc-acl:source-port-uuid': 's_p',
+ 'netvirt-sfc-acl:destination-port-uuid': 'd_p'},
+ 'actions': 'm_actions'}]}}]}}
+
+ expected = [{'of-matches': ['of-match-two', 'tp_dst=22']},
+ {'of-matches': ['of-match-one', 'tp_src=22'],
+ 'reverse-path': 'r-path-one'}]
+
+ result = odl_utils.get_active_rsps_on_ports('odl_ip',
+ 'odl_port',
+ neutron_ports)
+
+ self.assertEqual(sorted(expected), sorted(result))
+ mock_log.warn.assert_has_calls(log_calls)
+ mock_rsps_from_netvirt_acl_actions.assert_called_once_with('odl_ip',
+ 'odl_port',
+ 'm_actions')
+
+ @patch('sfc.lib.odl_utils.get_odl_resource_elem', autospec=True)
+ def test_get_rsps_from_netvirt_acl_actions(self, mock_odl_resource_elem):
+ """
+ Checks the proper functionality of get_rsps_from_netvirt_acl_actions
+ function
+ """
+
+ netv = {'netvirt-sfc-acl:rsp-name': 'rsp-name',
+ 'netvirt-sfc-acl:sfp-name': 'sfp-name'}
+
+ sfp_state = {'sfp-rendered-service-path': [{'name': 'sfp-rsp-one'},
+ {'name': 'sfp-rsp-two'}]}
+
+ mock_odl_resource_elem.return_value = sfp_state
+ rsp_names = ['rsp-name', 'sfp-rsp-one', 'sfp-rsp-two']
+
+ result = odl_utils.get_rsps_from_netvirt_acl_actions('odl_ip',
+ 'odl_port',
+ netv)
+ self.assertEqual(rsp_names, result)
+ mock_odl_resource_elem.assert_called_once_with('odl_ip', 'odl_port',
+ 'service-function-path-'
+ 'state', 'sfp-name',
+ datastore='operational')
+
+ @patch('sfc.lib.odl_utils.get_odl_resource_elem',
+ autospec=True, return_value='mocked_rsp')
+ def test_get_rsp(self, mock_odl_resource_elem):
+ """
+ Checks the proper functionality of get_rsp
+ function
+ """
+
+ result = odl_utils.get_rsp('odl_ip', 'odl_port', 'rsp_name')
+ self.assertEqual('mocked_rsp', result)
+ mock_odl_resource_elem.assert_called_once_with('odl_ip', 'odl_port',
+ 'rendered-service-path',
+ 'rsp_name',
+ datastore='operational')
+
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_promised_rsps_in_compute(self, mock_active_rsps_on_ports):
+ """
+ Checks the proper functionality of propmised_rsps_in_compute
+ function
+ """
+
+ mock_active_rsps_on_ports.return_value = [
+ {'of-matches': {'one': 'one'}, 'path-id': 1},
+ {'of-matches': {'two': 'two'}, 'path-id': 2}]
+
+ result = odl_utils.promised_rsps_in_compute('odl_ip', 'odl_port',
+ 'compute_ports')
+
+ self.assertEqual(['0x1|one', '0x2|two'], result)
+ mock_active_rsps_on_ports.assert_called_once_with('odl_ip', 'odl_port',
+ 'compute_ports')
+
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('time.time', autospec=True, side_effect=[1, 2])
+ def test_timethis(self,
+ mock_time,
+ mock_log):
+ """
+ Checks the proper functionality of timethis
+ function
+ """
+
+ expected = ('mock_this', '1')
+ log_calls = [call("mock_func(*('mock',), **{'name': 'this'}) "
+ "took: 1 sec")]
+
+ @odl_utils.timethis
+ def mock_func(msg, name=''):
+ return msg+'_'+name
+
+ result = mock_func('mock', name='this')
+ self.assertEqual(result, expected)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.get_odl_items', autospec=True)
+ @patch('sfc.lib.odl_utils.promised_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.os_sfc_utils.get_tacker_items', autospec=True)
+ def test_wait_for_classification_rules_rsps_not_configured(
+ self, mock_get_tacker_items, mock_promised_rsps_in_compute,
+ mock_get_odl_items, mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of wait_for_classification_rules
+ function when rsps are not configured in ODL
+ """
+
+ log_calls = [call("Error when waiting for classification rules: "
+ "RSPs not configured in ODL")]
+
+ mock_find_compute.return_value = 'mock_compute'
+ mock_promised_rsps_in_compute.return_value = None
+
+ odl_utils.wait_for_classification_rules('ovs_logger',
+ 'compute_nodes',
+ 'odl_ip',
+ 'odl_port',
+ 'compute_name',
+ 'neutron_ports')
+ mock_promised_rsps_in_compute.assert_called_with('odl_ip',
+ 'odl_port',
+ 'neutron_ports')
+ assert mock_promised_rsps_in_compute.call_count == 10
+ mock_find_compute.assert_called_once_with('compute_name',
+ 'compute_nodes')
+ mock_sleep.assert_called_with(3)
+ assert mock_sleep.call_count == 9
+ mock_get_tacker_items.assert_called_once_with()
+ mock_get_odl_items.assert_called_once_with('odl_ip', 'odl_port')
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.promised_rsps_in_compute', autospec=True)
+ def test_wait_for_classification_rules_timeout_not_updated(
+ self, mock_promised_rsps_in_compute, mock_actual_rsps_in_compute,
+ mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of wait_for_classification_rules
+ function when classification rules are not updated in a given timeout
+ """
+
+ log_calls = [call("Timeout but classification rules are not updated"),
+ call("RSPs in ODL Operational DataStore"
+ "for compute 'compute_name':"),
+ call("['compute|rsps']"),
+ call("RSPs in compute nodes:"),
+ call("[]")]
+
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_find_compute.return_value = mock_compute
+ mock_actual_rsps_in_compute.return_value = []
+ mock_promised_rsps_in_compute.return_value = ['compute|rsps']
+
+ odl_utils.wait_for_classification_rules('ovs_logger',
+ 'compute_nodes',
+ 'odl_ip',
+ 'odl_port',
+ 'compute_name',
+ 'neutron_ports',
+ timeout=2)
+ mock_find_compute.assert_called_once_with('compute_name',
+ 'compute_nodes')
+ mock_log.error.assert_has_calls(log_calls[:1])
+ mock_log.info.assert_has_calls(log_calls[1:])
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.promised_rsps_in_compute', autospec=True)
+ def test_wait_for_classification_rules_updated(
+ self, mock_promised_rsps_in_compute, mock_actual_rsps_in_compute,
+ mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of wait_for_classification_rules
+ function when classification rules are not updated in a given timeout
+ """
+
+ log_calls = [call("RSPs in ODL Operational DataStore"
+ "for compute 'compute_name':"),
+ call("['compute|rsps']"),
+ call("RSPs in compute nodes:"),
+ call("['compute|rsps']"),
+ call("Classification rules were updated")]
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_find_compute.return_value = mock_compute
+ mock_actual_rsps_in_compute.return_value = ['compute|rsps']
+ mock_promised_rsps_in_compute.return_value = ['compute|rsps']
+
+ odl_utils.wait_for_classification_rules('ovs_logger',
+ 'compute_nodes',
+ 'odl_ip',
+ 'odl_port',
+ 'compute_name',
+ 'neutron_ports',
+ timeout=2)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('re.search', autospec=True)
+ @patch('ConfigParser.RawConfigParser', autospec=True)
+ @patch('os.getcwd', autospec=True, return_value='/etc')
+ @patch('os.path.join', autospec=True, return_value='/etc/ml2_conf.ini')
+ def test_get_odl_ip_port(self, mock_join,
+ mock_getcwd,
+ mock_rawconfigparser,
+ mock_search):
+ """
+ Checks the proper functionality of get_odl_ip_port
+ function
+ """
+
+ cmd_calls = [call('pwd'),
+ call('sudo cp /etc/neutron/plugins/ml2/ml2_conf.ini '
+ '/etc/'),
+ call('sudo chmod 777 /etc/ml2_conf.ini')]
+
+ n1 = Mock()
+ n2 = Mock()
+ nodes = [n1, n2]
+ mock_rawconfigparser.return_value.get.return_value = 'config'
+ mock_search.return_value.group.return_value = 'odl_ip:odl_port'
+ n1.run_cmd.side_effect = ['/etc', '', '']
+
+ result = odl_utils.get_odl_ip_port(nodes)
+ self.assertEqual(('odl_ip', 'odl_port'), result)
+ n1.run_cmd.assert_has_calls(cmd_calls)
+ n1.is_controller.assert_called_once_with()
+ mock_getcwd.assert_called_once_with()
+ mock_join.assert_called_once_with('/etc', 'ml2_conf.ini')
+ n1.get_file.assert_called_once_with('/etc/ml2_conf.ini',
+ '/etc/ml2_conf.ini')
+ mock_rawconfigparser.return_value.read.assert_called_once_with(
+ '/etc/ml2_conf.ini')
+ mock_rawconfigparser.return_value.get.assert_called_with(
+ 'ml2_odl', 'url')
+ mock_search.assert_called_once_with(r'[0-9]+(?:\.[0-9]+){3}\:[0-9]+',
+ 'config')
+
+ @patch('re.search', autospec=True)
+ @patch('ConfigParser.RawConfigParser', autospec=True)
+ @patch('os.getcwd', autospec=True, return_value='/etc')
+ @patch('os.path.join', autospec=True, return_value='/etc/ml2_conf.ini')
+ def test_get_odl_username_password(self, mock_join,
+ mock_getcwd,
+ mock_rawconfigparser,
+ mock_search):
+ """
+ Check the proper functionality of get odl_username_password
+ function
+ """
+
+ mock_rawconfigparser.return_value.get.return_value = 'odl_username'
+ result = odl_utils.get_odl_username_password()
+ self.assertEqual(('odl_username'), result[0])
+ mock_getcwd.assert_called_once_with()
+ mock_join.assert_called_once_with('/etc', 'ml2_conf.ini')
+ mock_rawconfigparser.return_value.read.assert_called_once_with(
+ '/etc/ml2_conf.ini')
+ mock_rawconfigparser.return_value.get.return_value = 'odl_password'
+ result = odl_utils.get_odl_username_password()
+ self.assertEqual(('odl_password'), result[1])
+
+ def test_pluralize(self):
+ """
+ Checks the proper functionality of pluralize
+ function
+ """
+
+ result = odl_utils.pluralize('service-function-path')
+ self.assertEqual('service-function-paths', result)
+
+ def test_get_module(self):
+ """
+ Checks the proper functionality of get_module
+ function
+ """
+
+ result = odl_utils.get_module('service-function-path')
+ self.assertEqual('service-function-path', result)
+
+ @patch('sfc.lib.odl_utils.get_module',
+ autospec=True, return_value='mocked_module')
+ @patch('sfc.lib.odl_utils.pluralize',
+ autospec=True, return_value='resources')
+ def test_format_odl_resource_list_url(self, mock_plularize,
+ mock_get_module):
+ """
+ Checks the proper functionality of format_odl_resource_list_url
+ function
+ """
+
+ result = odl_utils.format_odl_resource_list_url('odl_ip',
+ 'odl_port',
+ 'resource')
+ formatted_url = ('http://admin:admin@odl_ip:'
+ 'odl_port/restconf/config/mocked_module:'
+ 'resources')
+ self.assertEqual(formatted_url, result)
+ mock_plularize.assert_called_once_with('resource')
+ mock_get_module.assert_called_once_with('resource')
+
+ @patch('sfc.lib.odl_utils.format_odl_resource_list_url',
+ autospec=True, return_value='list_u/r/l')
+ def test_format_odl_resource_elem_url(self, mock_odl_resource_list_url):
+ """
+ Checks the proper functionality of format_odl_resource_elem_url
+ function
+ """
+
+ result = odl_utils.format_odl_resource_elem_url('odl_ip', 'odl_port',
+ 'resource',
+ 'elem_name')
+ formatted_url = ('list_u/r/l/resource/elem_name')
+ self.assertEqual(formatted_url, result)
+ mock_odl_resource_list_url.assert_called_once_with('odl_ip',
+ 'odl_port',
+ 'resource',
+ 'config')
+
+ @patch('sfc.lib.odl_utils.pluralize',
+ autospec=True, return_value='resources')
+ def test_odl_resource_list_names_returns_empty_list(self, mock_plularize):
+ """
+ Checks the proper functionality of odl_resource_list_names
+ function when resources are empty
+ """
+
+ resource_json = {'resources': {}}
+ result = odl_utils.odl_resource_list_names('resource', resource_json)
+ self.assertEqual([], result)
+
+ @patch('sfc.lib.odl_utils.pluralize',
+ autospec=True, return_value='resources')
+ def test_odl_resource_list_names(self, mock_plularize):
+ """
+ Checks the proper functionality of odl_resource_list_names
+ function
+ """
+
+ resource_json = {'resources': {'resource': [{'name': 'resource_one'},
+ {'name': 'resource_two'}]}}
+ result = odl_utils.odl_resource_list_names('resource', resource_json)
+ self.assertEqual(['resource_one', 'resource_two'], result)
+
+ @patch('requests.get', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_resource_list_url', autospec=True)
+ def test_get_odl_resource_list(self,
+ mock_odl_resource_list_url,
+ mock_get):
+ """
+ Checks the proper functionality of get_odl_resource_list
+ function
+ """
+
+ mock_odl_resource_list_url.return_value = 'u/r/l'
+ mock_get.return_value.json.return_value = {'key': 'value'}
+
+ result = odl_utils.get_odl_resource_list('odl_ip',
+ 'odl_port',
+ 'resource')
+
+ self.assertEqual({'key': 'value'}, result)
+ mock_odl_resource_list_url.assert_called_once_with('odl_ip',
+ 'odl_port',
+ 'resource',
+ datastore='config')
+ mock_get.assert_called_once_with('u/r/l')
+
+ @patch('requests.get', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_resource_elem_url', autospec=True)
+ def test_get_odl_resource_elem(self,
+ mock_odl_resource_elem_url,
+ mock_get):
+ """
+ Checks the proper functionality of get_odl_resource_elem
+ function
+ """
+
+ mock_response = Mock()
+ mock_response.get.return_value = ['elem_one', 'elem_two']
+ mock_get.return_value.json.return_value = mock_response
+ mock_odl_resource_elem_url.return_value = 'u/r/l'
+
+ result = odl_utils.get_odl_resource_elem(
+ 'odl_ip', 'odl_port', 'resource', 'elem_name')
+
+ self.assertEqual('elem_one', result)
+ mock_odl_resource_elem_url.assert_called_once_with(
+ 'odl_ip', 'odl_port', 'resource', 'elem_name', 'config')
+ mock_get.assert_called_once_with('u/r/l')
+ mock_response.get.assert_called_once_with('resource', [{}])
+
+ @patch('requests.delete', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_resource_elem_url',
+ autospec=True, return_value='u/r/l')
+ def test_delete_odl_resource_elem(self,
+ mock_odl_resource_elem_url,
+ mock_delete):
+ """
+ Checks the proper functionality of delete_odl_resource_elem
+ function
+ """
+
+ odl_utils.delete_odl_resource_elem('odl_ip', 'odl_port', 'resource',
+ 'elem_name')
+
+ mock_odl_resource_elem_url('odl_ip', 'odl_port', 'resource',
+ 'elem_name', 'config')
+ mock_delete.assert_called_once_with('u/r/l')
+
+ def test_odl_acl_types_names_returns_empty_list(self):
+ """
+ Checks the proper functionality of odl_acl_types_names
+ function when access lists are empty
+ """
+
+ acl_json = {'access-lists': {}}
+ result = odl_utils.odl_acl_types_names(acl_json)
+ self.assertEqual([], result)
+
+ def test_odl_acl_types_names(self):
+ """
+ Checks the proper functionality of odl_acl_types_names
+ function
+ """
+
+ acl_json = {'access-lists': {'acl': [{'acl-type': 'type-one',
+ 'acl-name': 'name-one'},
+ {'acl-type': 'type-two',
+ 'acl-name': 'name-two'}]}}
+ acl_types = [('type-one', 'name-one'),
+ ('type-two', 'name-two')]
+
+ result = odl_utils.odl_acl_types_names(acl_json)
+ self.assertEqual(acl_types, result)
+
+ def test_format_odl_acl_list_url(self):
+ """
+ Checks the proper functionality of format_odl_acl_list_url
+ function
+ """
+
+ formatted_url = ('http://admin:admin@odl_ip:odl_port/restconf/config/'
+ 'ietf-access-control-list:access-lists')
+ result = odl_utils.format_odl_acl_list_url('odl_ip', 'odl_port')
+ self.assertEqual(formatted_url, result)
+
+ @patch('json.dumps',
+ autospec=True, return_value='{\n "key": "value"\n}')
+ def test_improve_json_layout(self, mock_dumps):
+ """
+ Checks the proper functionality of improve_json_layout
+ function
+ """
+
+ result = odl_utils.improve_json_layout({'key': 'value'})
+
+ self.assertEqual('{\n "key": "value"\n}', result)
+ mock_dumps.assert_called_once_with({'key': 'value'},
+ indent=4,
+ separators=(',', ': '))
+
+ @patch('requests.get', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_acl_list_url',
+ autospec=True, return_value='acl_list_u/r/l')
+ @patch('sfc.lib.odl_utils.improve_json_layout', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_resource_list_url', autospec=True)
+ def test_get_odl_items(self,
+ mock_odl_resource_list_url,
+ mock_json_layout,
+ mock_odl_acl_list_url,
+ mock_log,
+ mock_get):
+ """
+ Checks the proper functionality of get_odl_items
+ function
+ """
+
+ log_calls = [call('Configured ACLs in ODL: r_acl_j_s_o_n'),
+ call('Configured SFs in ODL: r_sf_j_s_o_n'),
+ call('Configured SFFs in ODL: r_sff_j_s_o_n'),
+ call('Configured SFCs in ODL: r_sfc_j_s_o_n'),
+ call('Configured RSPs in ODL: r_sp_j_s_o_n')]
+
+ resource_list_url_calls = [call('odl_ip', 'odl_port',
+ 'service-function'),
+ call('odl_ip', 'odl_port',
+ 'service-function-forwarder'),
+ call('odl_ip', 'odl_port',
+ 'service-function-chain'),
+ call('odl_ip', 'odl_port',
+ 'rendered-service-path',
+ datastore='operational')]
+
+ resource_list_urls = ['sf_list_u/r/l', 'sff_list_u/r/l',
+ 'sfc_list_u/r/l', 'rsp_list_u/r/l']
+
+ get_calls = [call(url) for url in resource_list_urls]
+
+ mock_odl_resource_list_url.side_effect = resource_list_urls
+
+ mock_get.return_value.json.side_effect = ['r_acl_json', 'r_sf_json',
+ 'r_sff_json', 'r_sfc_json',
+ 'r_rsp_json']
+
+ mock_json_layout.side_effect = ['r_acl_j_s_o_n', 'r_sf_j_s_o_n',
+ 'r_sff_j_s_o_n', 'r_sfc_j_s_o_n',
+ 'r_sp_j_s_o_n']
+
+ odl_utils.get_odl_items('odl_ip', 'odl_port')
+
+ mock_odl_acl_list_url.assert_called_once_with('odl_ip', 'odl_port')
+ mock_odl_resource_list_url.assert_has_calls(resource_list_url_calls)
+ mock_get.assert_has_calls(get_calls, any_order=True)
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('requests.get', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_acl_list_url', autospec=True)
+ def test_get_odl_acl_list(self,
+ mock_acl_list_url,
+ mock_get):
+ """
+ Checks the proper functionality of get_odl_acl_list
+ function
+ """
+
+ mock_acl_list_url.return_value = 'acl_list/url'
+ mock_get.return_value.json.return_value = {'key': 'value'}
+ result = odl_utils.get_odl_acl_list('odl_ip', 'odl_port')
+ mock_acl_list_url.assert_called_once_with('odl_ip', 'odl_port')
+ mock_get.assert_called_once_with('acl_list/url')
+ self.assertEqual({'key': 'value'}, result)
+
+ @patch('requests.delete', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_acl_list_url', autospec=True)
+ def test_delete_odl_acl(self,
+ mock_acl_list_url,
+ mock_delete):
+ """
+ Checks the proper functionality of delete_odl_acl
+ function
+ """
+
+ mock_acl_list_url.return_value = 'acl_list/url'
+
+ odl_utils.delete_odl_acl('odl_ip', 'odl_port', 'acl_type', 'acl_name')
+
+ mock_acl_list_url.assert_called_once_with('odl_ip', 'odl_port')
+ mock_delete.assert_called_once_with(
+ 'acl_list/url/acl/acl_type/acl_name')
+
+ @patch('sfc.lib.odl_utils.delete_odl_acl', autospec=True)
+ def test_delete_acl(self, mock_delete_odl_acl):
+ """
+ Checks the proper fucntionality of delete_acl
+ function
+ """
+
+ odl_utils.delete_acl('clf_name', 'odl_ip', 'odl_port')
+ mock_delete_odl_acl.assert_called_once_with(
+ 'odl_ip',
+ 'odl_port',
+ 'ietf-access-control-list:ipv4-acl',
+ 'clf_name')
+
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ def test_find_compute_raises_exception(self, mock_log):
+ """
+ Checks the proper functionality of find_compute
+ function when compute was not found in the client
+ """
+
+ ErrorMSG = 'No compute, where the client is, was found'
+ compute_node_one = Mock()
+ compute_node_two = Mock()
+ compute_nodes = [compute_node_one, compute_node_two]
+ compute_node_one.name = 'compute_one'
+ compute_node_two.name = 'compute_two'
+
+ with self.assertRaises(Exception) as cm:
+ odl_utils.find_compute('compute_client', compute_nodes)
+
+ self.assertEqual(ErrorMSG, cm.exception.message)
+ mock_log.debug.assert_called_once_with(ErrorMSG)
+
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ def test_find_compute(self, mock_log):
+ """
+ Checks the proper functionality of find_compute
+ function when compute was not found in the client
+ """
+
+ compute_node_one = Mock()
+ compute_node_two = Mock()
+ compute_nodes = [compute_node_one, compute_node_two]
+ compute_node_one.name = 'compute_one'
+ compute_node_two.name = 'compute_two'
+
+ result = odl_utils.find_compute('compute_two', compute_nodes)
+
+ self.assertEqual(compute_node_two, result)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_check_vnffg_deletion_returns_false_rsps_still_active(
+ self, mock_active_rsps_on_ports,
+ mock_log, mock_sleep):
+ """
+ Checks the proper functionality of check_vnffg_deletion
+ function to verify that it returns false on the given condition
+ """
+
+ log_calls = [call('RSPs are still active in the MD-SAL')]
+ mock_active_rsps_on_ports.return_value = True
+ result = odl_utils.check_vnffg_deletion('odl_ip', 'odl_port',
+ 'ovs_logger', 'neutron_ports',
+ 'compute_client_name',
+ 'compute_nodes', retries=1)
+ self.assertFalse(result)
+ mock_active_rsps_on_ports.assert_called_once_with('odl_ip', 'odl_port',
+ 'neutron_ports')
+ mock_sleep.assert_called_once_with(3)
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_check_vnffg_deletion_returns_false_error_getting_compute(
+ self, mock_active_rsps_on_ports, mock_actual_rsps,
+ mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of check_vnffg_deletion
+ function to verify that it returns false on the given condition
+ """
+
+ log_calls = [call('There was an error getting the compute: ErrorMSG')]
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_find_compute.side_effect = [Exception('ErrorMSG'), mock_compute]
+ mock_active_rsps_on_ports.side_effect = [True, False]
+ result = odl_utils.check_vnffg_deletion('odl_ip', 'odl_port',
+ 'ovs_logger', 'neutron_ports',
+ 'compute_client_name',
+ 'compute_nodes', retries=2)
+ self.assertFalse(result)
+ mock_sleep.assert_called_once_with(3)
+ mock_find_compute.assert_called_once_with('compute_client_name',
+ 'compute_nodes')
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_check_vnffg_deletion_returns_false_classification_flow_in_compute(
+ self, mock_active_rsps_on_ports, mock_actual_rsps,
+ mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of check_vnffg_deletion
+ function to verify that it returns false on the given condition
+ """
+
+ log_calls = [call('Classification flows still in the compute')]
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_find_compute.return_value = mock_compute
+ mock_actual_rsps.side_effect = [True, True]
+ mock_active_rsps_on_ports.side_effect = [True, False]
+ result = odl_utils.check_vnffg_deletion('odl_ip', 'odl_port',
+ 'ovs_logger', 'neutron_ports',
+ 'compute_client_name',
+ 'compute_nodes', retries=2)
+ self.assertFalse(result)
+ mock_actual_rsps.assert_called_with('ovs_logger', 'mock_ssh_client')
+ mock_sleep.assert_called_with(3)
+ mock_find_compute.assert_called_once_with('compute_client_name',
+ 'compute_nodes')
+ assert mock_sleep.call_count == 3
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_check_vnffg_deletion_returns_true(self,
+ mock_active_rsps_on_ports,
+ mock_actual_rsps,
+ mock_find_compute,
+ mock_log, mock_sleep):
+ """
+ Checks the proper functionality of check_vnffg_deletion
+ function to verify that it returns true
+ """
+
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_active_rsps_on_ports.side_effect = [True, False]
+
+ mock_actual_rsps.side_effect = [True, False]
+
+ mock_find_compute.return_value = mock_compute
+
+ result = odl_utils.check_vnffg_deletion('odl_ip', 'odl_port',
+ 'ovs_logger', 'neutron_ports',
+ 'compute_client_name',
+ 'compute_nodes', retries=2)
+ self.assertTrue(result)
+ mock_find_compute.assert_called_once_with('compute_client_name',
+ 'compute_nodes')
+ assert mock_sleep.call_count == 2
+ mock_log.assert_not_called()
diff --git a/sfc/unit_tests/unit/lib/test_openstack_utils.py b/sfc/unit_tests/unit/lib/test_openstack_utils.py
new file mode 100644
index 00000000..bdd53d36
--- /dev/null
+++ b/sfc/unit_tests/unit/lib/test_openstack_utils.py
@@ -0,0 +1,2504 @@
+#!/usr/bin/env python
+
+###############################################################################
+# Copyright (c) 2018 Venkata Harshavardhan Reddy Allu and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+import unittest
+
+from mock import call
+from mock import Mock
+from mock import patch
+from mock import mock_open
+from mock import DEFAULT
+from mock import MagicMock
+
+import sfc.lib.openstack_utils as os_sfc_utils
+from tackerclient.v1_0 import client as tacker_client
+
+__author__ = "Harshavardhan Reddy <venkataharshavardhan_ven@srmuniv.edu.in>"
+
+
+class SfcOpenStackUtilsTesting(unittest.TestCase):
+
+ def setUp(self):
+ self.patcher1 = patch.object(os_sfc_utils.constants,
+ 'ENV_FILE', autospec=True)
+ self.patcher2 = patch.object(os_sfc_utils.openstack_tests,
+ 'get_credentials', autospec=True)
+ self.patcher3 = patch.object(os_sfc_utils.nova_utils,
+ 'nova_client', autospec=True)
+ self.patcher4 = patch.object(os_sfc_utils.neutron_utils,
+ 'neutron_client', autospec=True)
+ self.patcher5 = patch.object(os_sfc_utils.heat_utils,
+ 'heat_client', autospec=True)
+ self.patcher6 = patch.object(os_sfc_utils.keystone_utils,
+ 'keystone_client', autospec=True)
+ self.patcher7 = patch.object(os_sfc_utils.connection,
+ 'from_config', autospec=True,)
+ self.patcher8 = patch.object(os_sfc_utils.neutronclient,
+ 'Client', autospec=True,)
+
+ self.env_file = self.patcher1.start().return_value
+ self.os_creds = self.patcher2.start().return_value
+ self.nova = self.patcher3.start().return_value
+ self.neutron = self.patcher4.start().return_value
+ self.heat = self.patcher5.start().return_value
+ self.keystone = self.patcher6.start().return_value
+ self.conn = self.patcher7.start().return_value
+ self.neutron_client = self.patcher8.start().return_value
+
+ self.os_sfc = os_sfc_utils.OpenStackSFC()
+
+ def tearDown(self):
+ self.patcher1.stop()
+ self.patcher2.stop()
+ self.patcher3.stop()
+ self.patcher4.stop()
+ self.patcher5.stop()
+ self.patcher6.stop()
+ self.patcher7.stop()
+ self.patcher8.stop()
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('os.environ', {'OS_NETWORK_API_VERSION': '1'})
+ def test_get_neutron_client_version(self,
+ mock_log):
+ """
+ Checks the proper functionality of get_neutron_client_version
+ """
+ log_calls = [call("OS_NETWORK_API_VERSION is 1")]
+ result = self.os_sfc.get_neutron_client_version()
+ assert result == '1'
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_register_glance_image_already_exists(self,
+ mock_log):
+ """
+ Checks the proper functionality of register_glance_image
+ function when the image is local
+ """
+ image_obj = Mock()
+ image_obj.name = 'name'
+ log_calls = [call('Registering the image...'),
+ call('Image ' + image_obj.name + ' already exists.')]
+
+ self.conn.image.find_image.return_value = image_obj
+ result = self.os_sfc.register_glance_image('name',
+ 'url',
+ 'img_format',
+ 'public')
+
+ self.conn.image.find_image.assert_called_once_with(image_obj.name)
+
+ assert result is image_obj
+
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch("__builtin__.open", autospec=True)
+ def test_register_glance_image_is_local(self,
+ mock_open_fn,
+ mock_log):
+ """
+ Checks the proper functionality of register_glance_image
+ function when the image is local
+ """
+ log_calls = [call('Registering the image...'),
+ call('Image created')]
+
+ image_obj_None = None
+ image_obj_name = 'name'
+ image_obj = Mock()
+ mocked_file = mock_open(read_data='url').return_value
+ mock_open_fn.return_value = mocked_file
+
+ self.conn.image.find_image.return_value = image_obj_None
+ self.conn.image.upload_image.return_value = image_obj
+ result = self.os_sfc.register_glance_image('name',
+ 'url',
+ 'img_format',
+ 'public')
+ assert result is image_obj
+
+ self.conn.image.find_image.assert_called_once_with(image_obj_name)
+
+ self.conn.image.upload_image.\
+ assert_called_once_with(name='name',
+ disk_format='img_format',
+ data='url',
+ is_public='public',
+ container_format='bare')
+
+ self.assertEqual([image_obj], self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.urllib2.urlopen', autospec=True)
+ def test_register_glance_image_is_not_local(self,
+ mock_urlopen,
+ mock_log):
+ """
+ Checks the proper functionality of register_glance_image
+ function when the image is not local
+ """
+ log_calls = [call('Registering the image...'),
+ call('Downloading image'),
+ call('Image created')]
+
+ image_obj_None = None
+ image_obj_name = 'name'
+ image_obj = Mock()
+ mock_file = Mock()
+ mock_file.read.side_effect = ['http://url']
+ mock_urlopen.return_value = mock_file
+
+ self.conn.image.find_image.return_value = image_obj_None
+ self.conn.image.upload_image.return_value = image_obj
+
+ result = self.os_sfc.register_glance_image('name',
+ 'http://url',
+ 'img_format',
+ 'public')
+
+ assert result is image_obj
+
+ self.conn.image.find_image.assert_called_once_with(image_obj_name)
+
+ self.conn.image.upload_image.\
+ assert_called_once_with(name='name',
+ disk_format='img_format',
+ data='http://url',
+ is_public='public',
+ container_format='bare')
+
+ self.assertEqual([image_obj], self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_flavour(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_flavor
+ function
+ """
+
+ mock_openstack_flavor_ins = self.conn.compute.\
+ create_flavor.return_value
+ log_calls = [call('Creating flavor...')]
+
+ result = self.os_sfc.create_flavor('name',
+ 'ram',
+ 'disk',
+ 'vcpus')
+ assert result is mock_openstack_flavor_ins
+ self.assertEqual([mock_openstack_flavor_ins],
+ self.os_sfc.creators)
+ self.conn.compute.create_flavor.\
+ assert_called_once_with(name='name',
+ ram='ram',
+ disk='disk',
+ vcpus='vcpus')
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.env.get', autospec=True)
+ def test_create_network_infrastructure(self, mock_env_get, mock_log):
+ log_calls = [call('Creating Networks...'),
+ call('Creating Router...')]
+ network_obj = Mock()
+ network_obj.id = '1'
+ subnet_obj = Mock()
+ subnet_obj.id = '2'
+ ext_network_obj = Mock()
+ ext_network_obj.id = '3'
+ router_obj = Mock()
+ router_obj.id = '4'
+
+ self.conn.network.create_network.return_value = network_obj
+ self.conn.network.create_subnet.return_value = subnet_obj
+ self.conn.network.find_network.return_value = ext_network_obj
+ self.conn.network.create_router.return_value = router_obj
+ self.conn.network.get_router.return_value = router_obj
+ mock_env_get.return_value = 'ext_net_name'
+
+ expected = (network_obj, router_obj)
+ result = self.os_sfc.create_network_infrastructure('net_name',
+ 'sn_name',
+ 'subnet_cidr',
+ 'router_name')
+ self.conn.network.create_network.\
+ assert_called_once_with(name='net_name')
+ self.conn.network.create_subnet.\
+ assert_called_once_with(name='sn_name', cidr='subnet_cidr',
+ network_id=network_obj.id, ip_version='4')
+ self.conn.network.find_network.\
+ assert_called_once_with('ext_net_name')
+ self.conn.network.create_router.\
+ assert_called_once_with(name='router_name')
+ self.conn.network.add_interface_to_router.\
+ assert_called_once_with(router_obj.id, subnet_id=subnet_obj.id)
+ self.conn.network.update_router.\
+ assert_called_once_with(
+ router_obj.id,
+ external_gateway_info={'network_id': ext_network_obj.id})
+ self.conn.network.get_router.assert_called_once_with(router_obj.id)
+
+ self.assertEqual(expected, result)
+ self.assertEqual([network_obj, subnet_obj, router_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_security_group(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_security_group
+ function
+ """
+ log_calls = [call('Creating the security groups...')]
+ sec_group_obj = Mock()
+ sec_group_obj.id = '1'
+
+ self.conn.network.create_security_group.return_value = sec_group_obj
+
+ result = self.os_sfc.create_security_group('sec_grp_name')
+ assert result is sec_group_obj
+
+ self.conn.network.create_security_group.\
+ assert_called_once_with(name='sec_grp_name')
+
+ pc_calls = [call(security_group_id=sec_group_obj.id,
+ direction='ingress',
+ protocol='icmp'),
+ call(security_group_id=sec_group_obj.id,
+ direction='ingress',
+ protocol='tcp',
+ port_range_min=22,
+ port_range_max=22),
+ call(security_group_id=sec_group_obj.id,
+ direction='ingress',
+ protocol='tcp',
+ port_range_min=80,
+ port_range_max=80)]
+
+ self.conn.network.create_security_group_rule.\
+ assert_has_calls(pc_calls)
+
+ self.assertEqual([sec_group_obj], self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_instance_port_security_false(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_instance
+ function
+ """
+
+ keypair_obj = Mock()
+ keypair_obj.name = 'keypair_name'
+ flavor_obj = Mock()
+ flavor_obj.id = '1'
+ port_obj1 = Mock()
+ port_obj1.id = '2'
+ port_obj2 = Mock()
+ port_obj2.id = '3'
+ instance_obj = Mock()
+ instance_obj.name = 'instance_name'
+ secgrp = Mock()
+ secgrp.name = 'sec_grp'
+ secgrp.id = '4'
+ img_cre = Mock()
+ img_cre.id = '5'
+ network = Mock()
+ network.id = '6'
+ ports = ['port1', 'port2']
+ port_security = False
+
+ log_calls = [call('Creating Key Pair vm_name...'),
+ call('Creating Port ' + str(ports) + '...'),
+ call('Creating the instance vm_name...'),
+ call('Waiting for instance_name to become Active'),
+ call('instance_name is active')]
+
+ self.conn.compute.create_keypair.return_value = keypair_obj
+ self.conn.compute.find_flavor.return_value = flavor_obj
+ self.conn.network.create_port.side_effect = [port_obj1, port_obj2]
+ self.conn.compute.create_server.return_value = instance_obj
+
+ port_obj_list = [port_obj1, port_obj2]
+
+ expected = (instance_obj, port_obj_list)
+ result = self.os_sfc.create_instance('vm_name',
+ 'flavor_name',
+ img_cre,
+ network,
+ secgrp,
+ 'av_zone',
+ ports,
+ port_security=port_security)
+ self.assertEqual(expected, result)
+
+ pc_calls = [call(name=ports[0],
+ is_port_security_enabled=port_security,
+ network_id=network.id),
+ call(name=ports[1],
+ is_port_security_enabled=port_security,
+ network_id=network.id)]
+
+ self.conn.compute.create_keypair.\
+ assert_called_once_with(name='vm_name' + "_keypair")
+
+ self.conn.compute.find_flavor.assert_called_once_with('flavor_name')
+
+ self.conn.network.create_port.\
+ assert_has_calls(pc_calls)
+
+ self.conn.compute.create_server.\
+ assert_called_once_with(name='vm_name',
+ image_id=img_cre.id,
+ flavor_id=flavor_obj.id,
+ networks=[{"port": port_obj1.id},
+ {"port": port_obj2.id}],
+ key_name=keypair_obj.name,
+ availability_zone='av_zone')
+
+ self.conn.compute.wait_for_server.\
+ assert_called_once_with(instance_obj)
+
+ self.assertEqual([keypair_obj, port_obj1, port_obj2, instance_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_instance(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_instance
+ function
+ """
+
+ keypair_obj = Mock()
+ keypair_obj.name = 'keypair_name'
+ flavor_obj = Mock()
+ flavor_obj.id = '1'
+ port_obj = Mock()
+ port_obj.id = '2'
+ instance_obj = Mock()
+ instance_obj.name = 'instance_name'
+ secgrp = Mock()
+ secgrp.name = 'sec_grp'
+ secgrp.id = '4'
+ img_cre = Mock()
+ img_cre.id = '5'
+ network = Mock()
+ network.id = '6'
+ ports = ['port1']
+ port_obj_list = [port_obj]
+ port_security = True
+
+ log_calls = [call('Creating Key Pair vm_name...'),
+ call('Creating Port ' + str(ports) + '...'),
+ call('Creating the instance vm_name...'),
+ call('Waiting for instance_name to become Active'),
+ call('instance_name is active')]
+
+ self.conn.compute.create_keypair.return_value = keypair_obj
+ self.conn.compute.find_flavor.return_value = flavor_obj
+ self.conn.network.create_port.return_value = port_obj
+ self.conn.compute.create_server.return_value = instance_obj
+ # self.conn.compute.wait_for_server.return_value = wait_ins_obj
+
+ expected = (instance_obj, port_obj_list)
+ result = self.os_sfc.create_instance('vm_name',
+ 'flavor_name',
+ img_cre,
+ network,
+ secgrp,
+ 'av_zone',
+ ports,
+ port_security=port_security)
+ self.assertEqual(expected, result)
+
+ pc_calls = [call(name=ports[0],
+ is_port_security_enabled=port_security,
+ network_id=network.id,
+ security_group_ids=[secgrp.id])]
+
+ self.conn.compute.create_keypair.\
+ assert_called_once_with(name='vm_name' + "_keypair")
+
+ self.conn.compute.find_flavor.assert_called_once_with('flavor_name')
+
+ self.conn.network.create_port.\
+ assert_has_calls(pc_calls)
+
+ self.conn.compute.create_server.\
+ assert_called_once_with(name='vm_name',
+ image_id=img_cre.id,
+ flavor_id=flavor_obj.id,
+ networks=[{"port": port_obj.id}],
+ key_name=keypair_obj.name,
+ availability_zone='av_zone')
+
+ self.conn.compute.wait_for_server.\
+ assert_called_once_with(instance_obj)
+
+ self.assertEqual([keypair_obj, port_obj, instance_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_instance_port_security_false_one_port(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_instance
+ function
+ """
+
+ keypair_obj = Mock()
+ keypair_obj.name = 'keypair_name'
+ flavor_obj = Mock()
+ flavor_obj.id = '1'
+ port_obj = Mock()
+ port_obj.id = '2'
+ instance_obj = Mock()
+ instance_obj.name = 'instance_name'
+ secgrp = Mock()
+ secgrp.name = 'sec_grp'
+ secgrp.id = '4'
+ img_cre = Mock()
+ img_cre.id = '5'
+ network = Mock()
+ network.id = '6'
+ ports = ['port1']
+ port_obj_list = [port_obj]
+ port_security = False
+
+ log_calls = [call('Creating Key Pair vm_name...'),
+ call('Creating Port ' + str(ports) + '...'),
+ call('Creating the instance vm_name...'),
+ call('Waiting for instance_name to become Active'),
+ call('instance_name is active')]
+
+ self.conn.compute.create_keypair.return_value = keypair_obj
+ self.conn.compute.find_flavor.return_value = flavor_obj
+ self.conn.network.create_port.return_value = port_obj
+ self.conn.compute.create_server.return_value = instance_obj
+
+ expected = (instance_obj, port_obj_list)
+ result = self.os_sfc.create_instance('vm_name',
+ 'flavor_name',
+ img_cre,
+ network,
+ secgrp,
+ 'av_zone',
+ ports,
+ port_security=port_security)
+ self.assertEqual(expected, result)
+
+ pc_calls = [call(name=ports[0],
+ is_port_security_enabled=port_security,
+ network_id=network.id)]
+
+ self.conn.compute.create_keypair.\
+ assert_called_once_with(name='vm_name' + "_keypair")
+
+ self.conn.compute.find_flavor.assert_called_once_with('flavor_name')
+
+ self.conn.network.create_port.\
+ assert_has_calls(pc_calls)
+
+ self.conn.compute.create_server.\
+ assert_called_once_with(name='vm_name',
+ image_id=img_cre.id,
+ flavor_id=flavor_obj.id,
+ networks=[{"port": port_obj.id}],
+ key_name=keypair_obj.name,
+ availability_zone='av_zone')
+
+ self.conn.compute.wait_for_server.\
+ assert_called_once_with(instance_obj)
+
+ self.assertEqual([keypair_obj, port_obj, instance_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ def test_get_instance(self):
+ """
+ Checks the proper functionality of get_instance function
+ """
+
+ mock_instance_id = 'instance-abyz'
+ mock_instance = Mock()
+ mock_instance.id = mock_instance_id
+ mock_instance.name = 'test-instance'
+ mock_instance.hypervisor_hostname = 'nova-abyz'
+ self.conn.compute.get_server_metadata.return_value = mock_instance
+ result = self.os_sfc.get_instance(mock_instance_id)
+ self.assertEqual(result, mock_instance)
+
+ @patch.object(os_sfc_utils.OpenStackSFC, 'get_hypervisor_hosts')
+ def test_get_av_zones(self, mock_hosts):
+ """
+ Checks the proper functionality of get_av_zone
+ function
+ """
+ mock_hosts.return_value = ['host1', 'host2']
+ result = self.os_sfc.get_av_zones()
+ mock_hosts.assert_called_once()
+ self.assertEqual(['nova::host1', 'nova::host2'], result)
+
+ def test_get_hypervisor_hosts(self):
+ """
+ Checks the proper functionality of get_av_zone
+ function
+ """
+ from openstack.compute.v2 import hypervisor
+
+ hypervisor1 = Mock()
+ hypervisor1.state = 'up'
+ hypervisor1.name = 'compute00'
+ hypervisor2 = Mock()
+ hypervisor2.state = 'up'
+ hypervisor2.name = 'compute01'
+ nodes = [hypervisor1.name, hypervisor2.name]
+ hypervisors_list = MagicMock()
+ mock_obj = patch.object(hypervisor, 'Hypervisor')
+ mock_obj.side_effect = [hypervisor1, hypervisor2]
+ self.conn.compute.hypervisors.return_value = hypervisors_list
+ hypervisors_list.__iter__.return_value = [hypervisor1, hypervisor2]
+
+ result = self.os_sfc.get_hypervisor_hosts()
+ self.conn.compute.hypervisors.assert_called_once()
+ self.assertEqual(nodes, result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_hypervisor_hosts_exception(self, mock_log):
+ """
+ Checks the proper functionality of get_av_zone
+ function when an exception appears
+ """
+ log_calls = [call('Error [get_hypervisors(compute)]: Error MSG')]
+ self.conn.compute.hypervisors.side_effect = Exception('Error MSG')
+ result = self.os_sfc.get_hypervisor_hosts()
+ mock_log.error.assert_has_calls(log_calls)
+ self.assertIsNone(result)
+
+ @patch('sfc.lib.openstack_utils.OpenStackSFC.get_vm_compute',
+ autospec=True, return_value='mock_client')
+ def test_compute_client(self, mock_get_vm_compute):
+ """
+ Checks the proper functionality of get_compute_client
+ function
+ """
+
+ result = self.os_sfc.get_compute_client()
+ self.assertEqual('mock_client', result)
+ mock_get_vm_compute.assert_called_once_with(self.os_sfc, 'client')
+
+ @patch('sfc.lib.openstack_utils.OpenStackSFC.get_vm_compute',
+ autospec=True, return_value='mock_server')
+ def test_get_compute_server(self, mock_get_vm_compute):
+ """
+ Checks the proper functionality of get_compute_server
+ function
+ """
+
+ result = self.os_sfc.get_compute_server()
+ self.assertEqual('mock_server', result)
+ mock_get_vm_compute.assert_called_once_with(self.os_sfc, 'server')
+
+ def test_get_vm_compute_raised_exception(self):
+ """
+ Checks the proper functionality of get_vm_compute
+ function when no VM with the given name is found
+ """
+
+ ErrorMSG = "There is no VM with name 'mock_vm_name'!!"
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.get_vm_compute('mock_vm_name')
+
+ self.assertEqual(cm.exception.message, ErrorMSG)
+
+ def test_get_vm_compute(self):
+ """
+ Checks the proper functionality of get_vm_compute
+ function
+ """
+
+ mock_cre_obj_1 = Mock()
+ mock_cre_obj_2 = Mock()
+ mock_cre_obj_1.get_vm_inst.return_value.name = 'pro_vm'
+ mock_cre_obj_2.get_vm_inst.return_value.name = 'dev_vm'
+ mock_cre_obj_2.get_vm_inst.return_value.compute_host = 'mock_host'
+ self.os_sfc.creators = [mock_cre_obj_1, mock_cre_obj_2]
+
+ result = self.os_sfc.get_vm_compute('dev_vm')
+ self.assertEqual('mock_host', result)
+
+ def test_get_port_by_ip(self):
+ """
+ Checks the proper functonality of get_port_by_ip function
+ """
+
+ mock_port_ip_address = 'e.f.g.h'
+ mock_port_one, mock_port_two = Mock(), Mock()
+ mock_port_one.id = 'port-abcd'
+ mock_port_two.id = 'port-efgz'
+ mock_port_one.fixed_ips = [{'ip_address': 'a.b.c.d'}]
+ mock_port_two.fixed_ips = [{'ip_address': 'e.f.g.h'}]
+ self.conn.network.ports.return_value = [mock_port_one, mock_port_two]
+ self.conn.network.get_port.return_value = mock_port_two
+ result = self.os_sfc.get_port_by_ip(mock_port_ip_address)
+ self.assertEqual(result, mock_port_two)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.OpenStackVmInstance',
+ autospec=True)
+ def test_get_instance_port_raised_exceptioin(self,
+ mock_os_vm,
+ mock_log):
+ """
+ Checks the proper functionality of get_client_port
+ function when no port is returned
+ """
+
+ mock_os_vm_ins = mock_os_vm.return_value
+ mock_vm = Mock()
+ mock_vm.name = 'mock_vm_name'
+ mock_os_vm_ins.get_port_by_name.return_value = None
+ ErrorMSG = 'Client VM does not have the desired port'
+ log_calls = [call("The VM mock_vm_name does not have any port"
+ " with name mock_vm_name-port")]
+
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.get_instance_port(mock_vm, mock_os_vm_ins)
+
+ self.assertEqual(cm.exception.message, ErrorMSG)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.OpenStackVmInstance',
+ autospec=True)
+ def test_get_instance_port(self,
+ mock_os_vm,
+ mock_log):
+ """
+ Checks the proper functionality of get_client_port
+ function when no port is returned
+ """
+
+ mock_os_vm_ins = mock_os_vm.return_value
+ mock_vm = Mock()
+ mock_vm.name = 'mock_vm_name'
+ mock_os_vm_ins.get_port_by_name.return_value = 'mock_port'
+ result = self.os_sfc.get_instance_port(mock_vm, mock_os_vm_ins)
+ self.assertEqual('mock_port', result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.env.get', autospec=True)
+ def test_assign_floating_ip(self,
+ mock_env_get,
+ mock_log):
+ """
+ Checks the proper functionality of assigning_floating_ip
+ function
+ """
+ ext_network_obj = Mock()
+ ext_network_obj.id = '1'
+ fip_obj = Mock()
+ fip_obj.floating_ip_address = 'floating_ip_address'
+ port_obj = Mock()
+ port_obj.id = '2'
+ instance_obj = Mock()
+ instance_obj.id = '3'
+
+ log_calls = [call(' Creating floating ips '),
+ call(' FLoating IP address '
+ + fip_obj.floating_ip_address
+ + ' created'),
+ call(' Adding Floating IPs to instances ')]
+
+ mock_env_get.return_value = 'ext_net_name'
+ self.conn.network.find_network.return_value = ext_network_obj
+ self.conn.network.create_ip.return_value = fip_obj
+ self.conn.netwotk.get_port.return_value = port_obj
+ self.conn.compute.get_server.return_value = instance_obj
+
+ result = self.os_sfc.assign_floating_ip(instance_obj, port_obj)
+ assert result is fip_obj.floating_ip_address
+
+ self.conn.network.find_network.assert_called_once_with('ext_net_name')
+ self.conn.network.create_ip.\
+ assert_called_once_with(floating_network_id=ext_network_obj.id,
+ port_id=port_obj.id)
+ self.conn.compute.add_floating_ip_to_server.\
+ assert_called_once_with(instance_obj.id,
+ fip_obj.floating_ip_address)
+
+ self.assertEqual([fip_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.heat_utils.get_stack_servers',
+ autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.generate_creator', autospec=True)
+ def test_assign_floating_ip_vnfs_raised_exception_ips_provided(
+ self, mock_generate_creator, mock_get_stack_servers, mock_log):
+ """
+ Checks the proper functionality of assign_floating_ip_vnfs
+ function when server name does not have any floating IP assignment
+ """
+
+ ErrorMSG = "The VNF server_name-float does not have any suitable" + \
+ " port with ip any of ['floating_ip', 'other_ip'] for" + \
+ " floating IP assignment"
+ log_calls = [call(ErrorMSG)]
+ self.os_sfc.image_settings = 'image_settings'
+ self.heat.stacks.list.return_value = ['stack_obj']
+ mock_ips = ['floating_ip', 'other_ip']
+ mock_server_obj = Mock()
+ mock_port_obj = Mock()
+ mock_server_obj.name = 'server_name'
+ mock_server_obj.ports = [mock_port_obj]
+ mock_port_obj.name = None
+ mock_port_obj.ips = [{'ip_address': 'floating_ip'}]
+ mock_get_stack_servers.return_value = [mock_server_obj]
+
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.assign_floating_ip_vnfs('router', mock_ips)
+
+ self.assertEqual(cm.exception.message, ErrorMSG)
+ mock_get_stack_servers.assert_called_once_with(self.heat,
+ self.nova,
+ self.neutron_client,
+ self.keystone,
+ 'stack_obj',
+ 'admin')
+ mock_generate_creator.assert_called_once_with(self.os_creds,
+ mock_server_obj,
+ 'image_settings',
+ 'admin')
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.heat_utils.get_stack_servers',
+ autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.generate_creator', autospec=True)
+ def test_assign_floating_ip_vnfs_raised_exception_ips_not_provided(
+ self, mock_generate_creator, mock_get_stack_servers, mock_log):
+ """
+ Checks the proper functionality of assign_floating_ip_vnfs
+ function when server name does not have any floating IP assignment
+ """
+
+ ErrorMSG = "The VNF server_name-float does not have any suitable" + \
+ " port for floating IP assignment"
+ log_calls = [call(ErrorMSG)]
+ self.os_sfc.image_settings = 'image_settings'
+ self.heat.stacks.list.return_value = ['stack_obj']
+ mock_server_obj = Mock()
+ mock_port_obj = Mock()
+ mock_server_obj.name = 'server_name'
+ mock_server_obj.ports = [mock_port_obj]
+ mock_port_obj.name = None
+ mock_port_obj.ips = [{'ip_address': 'floating_ip'}]
+ mock_get_stack_servers.return_value = [mock_server_obj]
+
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.assign_floating_ip_vnfs('router')
+
+ mock_get_stack_servers.assert_called_once_with(self.heat,
+ self.nova,
+ self.neutron_client,
+ self.keystone,
+ 'stack_obj',
+ 'admin')
+ mock_generate_creator.assert_called_once_with(self.os_creds,
+ mock_server_obj,
+ 'image_settings',
+ 'admin')
+ self.assertEqual(cm.exception.message, ErrorMSG)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.FloatingIpConfig', autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.generate_creator',
+ autospec=True)
+ @patch('sfc.lib.openstack_utils.heat_utils.get_stack_servers',
+ autospec=True)
+ def test_assign_floating_ip_vnfs(self,
+ mock_get_stack_servers,
+ mock_generate_creator,
+ mock_floating_ip_config):
+ """
+ Checks the proper functionality of assign_floating_ip_vnfs
+ function
+ """
+
+ self.os_sfc.image_settings = 'image_settings'
+ self.heat.stacks.list.return_value = ['stack_obj']
+
+ mock_router = Mock()
+ mock_server_obj = Mock()
+ mock_ip_obj = Mock()
+ mock_port_obj = Mock()
+ mock_router.name = 'm_router'
+ mock_server_obj.name = 'serv_obj'
+ mock_server_obj.ports = [mock_port_obj]
+ mock_ips = ['floating_ip', 'other_ip']
+ mock_ip_obj.ip = 'mocked_ip'
+ mock_port_obj.name = 'port_obj'
+ mock_port_obj.ips = [{'ip_address': 'floating_ip'}]
+ mock_get_stack_servers.return_value = [mock_server_obj]
+ mock_os_vm_ins = mock_generate_creator.return_value
+ float_ip_ins = mock_floating_ip_config.return_value
+ mock_os_vm_ins.add_floating_ip.return_value = mock_ip_obj
+
+ result = self.os_sfc.assign_floating_ip_vnfs(mock_router, mock_ips)
+ self.assertEqual(['mocked_ip'], result)
+ self.assertEqual([mock_os_vm_ins], self.os_sfc.creators)
+ mock_get_stack_servers.assert_called_once_with(self.heat,
+ self.nova,
+ self.neutron_client,
+ self.keystone,
+ 'stack_obj', 'admin')
+ mock_generate_creator.assert_called_once_with(self.os_creds,
+ mock_server_obj,
+ 'image_settings',
+ 'admin')
+ mock_floating_ip_config.assert_called_once_with(name='serv_obj-float',
+ port_name='port_obj',
+ router_name='m_router')
+ mock_os_vm_ins.add_floating_ip.assert_called_once_with(float_ip_ins)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_all_security_groups(self, mock_log):
+ """
+ Checks the proper functionality of delete_all_security_groups
+ function
+ """
+
+ log_calls_info = [call('Deleting remaining security groups...')]
+ secgrp1_obj = Mock()
+ secgrp2_obj = Mock()
+ secgrp_list = MagicMock()
+
+ self.conn.network.create_security_groups.side_effect = [secgrp1_obj,
+ secgrp2_obj]
+ self.conn.network.security_groups.return_value = secgrp_list
+
+ secgrp_list.__iter__.return_value = [secgrp1_obj, secgrp2_obj]
+ del_calls = [call(secgrp1_obj),
+ call(secgrp2_obj)]
+
+ self.os_sfc.delete_all_security_groups()
+ self.conn.network.security_groups.assert_called_once()
+ self.conn.network.delete_security_group.assert_has_calls(del_calls)
+ mock_log.info.assert_has_calls(log_calls_info)
+
+ @patch('sfc.lib.openstack_utils.cr_inst.OpenStackVmInstance',
+ autospec=True)
+ def test_wait_for_vnf(self, mock_os_vm):
+ """
+ Checks the proper functionality of wait_for_vnf function
+ """
+
+ mock_os_vm.vm_active.return_value = "x"
+ result = self.os_sfc.wait_for_vnf(mock_os_vm)
+ self.assertEqual('x', result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_raises_exception(self, mock_log):
+ """
+ Checks the create_port_groups when length of ports is greater than 2
+ """
+ instance_obj = Mock()
+ instance_obj.name = 'name'
+ self.conn.compute.get_server.return_value = instance_obj
+
+ log_calls_info = [call('Creating the port pairs...')]
+ log_calls_err = [call('Only SFs with one or two ports are supported')]
+ exception_message = "Failed to create port pairs"
+ vnf_ports = ['p1', 'p2', 'p3']
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.create_port_groups(vnf_ports, instance_obj)
+ self.assertEqual(exception_message, cm.exception.message)
+ mock_log.info.assert_has_calls(log_calls_info)
+ mock_log.error.assert_has_calls(log_calls_err)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_returns_none_from_pp(self, mock_log):
+ """
+ Checks the create_port_groups when something goes wrong in port pair
+ creation
+ """
+ instance_obj = Mock()
+ instance_obj.name = 'name'
+ port_obj1 = Mock()
+ port_obj2 = Mock()
+ port_obj1.id = '123abc'
+ port_obj2.id = '456def'
+
+ self.conn.compute.get_server.return_value = instance_obj
+ self.conn.network.get_port.return_value = port_obj1
+ self.conn.network.get_port.return_value = port_obj2
+
+ log_calls_info = [call('Creating the port pairs...')]
+ log_calls_warn = [call('Chain creation failed due to port pair '
+ 'creation failed for vnf %(vnf)s',
+ {'vnf': instance_obj.name})]
+ self.neutron_client.create_sfc_port_pair.return_value = None
+ result = self.os_sfc.create_port_groups(
+ [port_obj1, port_obj2], instance_obj)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls_info)
+ mock_log.warning.assert_has_calls(log_calls_warn)
+
+ @patch('snaps.domain.network.Port', autospec=True)
+ @patch('snaps.domain.vm_inst.VmInst', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_exception_nopp(self, mock_log, mock_osvm,
+ mock_port):
+ """
+ Checks the create_port_groups when openstack does not commit the pp
+ """
+
+ log_calls_info = [call('Creating the port pairs...')]
+ mock_port_ins = mock_port.return_value
+ mock_port_ins.id = '123abc'
+ mock_vm_ins = mock_osvm.return_value
+ mock_vm_ins.name = 'vm'
+ exception_message = "Port pair was not committed in openstack"
+ expected_port_pair = {'name': 'vm-connection-points',
+ 'description': 'port pair for vm',
+ 'ingress': '123abc',
+ 'egress': '123abc'}
+ self.neutron_client.create_sfc_port_pair.return_value = \
+ {'port_pair': {'id': 'pp_id'}}
+ self.neutron_client.list_sfc_port_pairs.return_value = \
+ {'port_pairs': [{'id': 'xxxx'}]}
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.create_port_groups([mock_port_ins], mock_vm_ins)
+ self.assertEqual(exception_message, cm.exception.message)
+ self.neutron_client.create_sfc_port_pair.assert_has_calls(
+ [call({'port_pair': expected_port_pair})])
+ mock_log.info.assert_has_calls(log_calls_info)
+
+ @patch('snaps.domain.network.Port', autospec=True)
+ @patch('snaps.domain.vm_inst.VmInst', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_returns_none_from_ppg(self, mock_log,
+ mock_vm,
+ mock_port):
+ """
+ Checks the create_port_groups when something goes wrong in port pair
+ group creation
+ """
+
+ instance_obj = Mock()
+ instance_obj.name = 'name'
+ port_obj = Mock()
+ port_obj.id = '123abc'
+
+ self.conn.compute.get_server.return_value = instance_obj
+ self.conn.network.get_port.return_value = port_obj
+
+ log_calls_info = [call('Creating the port pairs...'),
+ call('Creating the port pair groups for name')]
+ log_calls_warn = [call('Chain creation failed due to port pair group '
+ 'creation failed for vnf '
+ '{}'.format(instance_obj.name))]
+ self.neutron_client.create_sfc_port_pair.return_value = \
+ {'port_pair': {'id': 'pp_id'}}
+ self.neutron_client.list_sfc_port_pairs.return_value = \
+ {'port_pairs': [{'id': 'pp_id'}]}
+ self.neutron_client.create_sfc_port_pair_group.return_value = None
+ result = self.os_sfc.create_port_groups([port_obj], instance_obj)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls_info)
+ mock_log.warning.assert_has_calls(log_calls_warn)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_returns_id(self, mock_log):
+ """
+ Checks the create_port_groups when everything goes as expected
+ """
+
+ log_calls_info = [call('Creating the port pairs...')]
+
+ instance_obj = Mock()
+ instance_obj.name = 'vm'
+ port_obj = Mock()
+ port_obj.id = '123abc'
+ self.conn.compute.get_server.return_value = instance_obj
+ self.conn.network.get_port.return_value = port_obj
+
+ expected_port_pair = {'name': 'vm-connection-points',
+ 'description': 'port pair for vm',
+ 'ingress': '123abc',
+ 'egress': '123abc'}
+ self.neutron_client.create_sfc_port_pair.return_value = \
+ {'port_pair': {'id': 'pp_id'}}
+ self.neutron_client.list_sfc_port_pairs.return_value = \
+ {'port_pairs': [{'id': 'pp_id'}]}
+ self.neutron_client.create_sfc_port_pair_group.return_value = \
+ {'port_pair_group': {'id': 'pp_id'}}
+ expected_port_pair_gr = {'name': 'vm-port-pair-group',
+ 'description': 'port pair group for vm',
+ 'port_pairs': ['pp_id']}
+
+ self.os_sfc.create_port_groups([port_obj], instance_obj)
+ self.neutron_client.create_sfc_port_pair.assert_has_calls(
+ [call({'port_pair': expected_port_pair})])
+ self.neutron_client.create_sfc_port_pair_group.assert_has_calls(
+ [call({'port_pair_group': expected_port_pair_gr})])
+ mock_log.info.assert_has_calls(log_calls_info)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_classifier(self, mock_log):
+ """
+ Checks the create_classifier method
+ """
+
+ log_calls = [call('Creating the classifier...')]
+ neutron_port = 'neutron_port_id'
+ port = 80
+ protocol = 'tcp'
+ fc_name = 'red_http'
+ symmetrical = False
+ self.neutron_client.create_sfc_flow_classifier.return_value = \
+ {'flow_classifier': {'id': 'fc_id'}}
+
+ expected_sfc_classifier_params = {'name': fc_name,
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ self.os_sfc.create_classifier(neutron_port, port,
+ protocol, fc_name, symmetrical)
+ self.neutron_client.create_sfc_flow_classifier.assert_has_calls(
+ [call({'flow_classifier': expected_sfc_classifier_params})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_classifier_symmetric(self, mock_log):
+ """
+ Checks the create_chain method
+ """
+
+ log_calls = [call('Creating the classifier...')]
+ neutron_port = 'neutron_port_id'
+ port = 80
+ protocol = 'tcp'
+ fc_name = 'red_http'
+ symmetrical = True
+ serv_p = '123'
+ server_ip = '1.1.1.2'
+ self.neutron_client.create_sfc_flow_classifier.return_value = \
+ {'flow_classifier': {'id': 'fc_id'}}
+
+ expected_sfc_classifier_params = {'name': fc_name,
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'destination_ip_prefix': server_ip,
+ 'logical_destination_port': serv_p,
+ 'protocol': protocol}
+ self.os_sfc.create_classifier(neutron_port, port,
+ protocol, fc_name, symmetrical,
+ server_port='123',
+ server_ip='1.1.1.2')
+ self.neutron_client.create_sfc_flow_classifier.assert_has_calls(
+ [call({'flow_classifier': expected_sfc_classifier_params})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_chain(self, mock_log):
+ """
+ Checks the create_chain method
+ """
+
+ log_calls = [call('Creating the classifier...'),
+ call('Creating the chain...')]
+ port_groups = ['1a', '2b']
+ neutron_port = 'neutron_port_id'
+ port = 80
+ protocol = 'tcp'
+ vnffg_name = 'red_http'
+ symmetrical = False
+ self.neutron_client.create_sfc_flow_classifier.return_value = \
+ {'flow_classifier': {'id': 'fc_id'}}
+ self.neutron_client.create_sfc_port_chain.return_value = \
+ {'port_chain': {'id': 'pc_id'}}
+
+ expected_sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ expected_chain_config = {'name': vnffg_name + '-port-chain',
+ 'description': 'port-chain for SFC',
+ 'port_pair_groups': port_groups,
+ 'flow_classifiers': ['fc_id']}
+
+ self.os_sfc.create_chain(port_groups, neutron_port, port,
+ protocol, vnffg_name, symmetrical)
+
+ self.neutron_client.create_sfc_flow_classifier.assert_has_calls(
+ [call({'flow_classifier': expected_sfc_classifier_params})])
+ self.neutron_client.create_sfc_port_chain.assert_has_calls(
+ [call({'port_chain': expected_chain_config})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_chain_symmetric(self, mock_log):
+ """
+ Checks the create_chain method
+ """
+
+ log_calls = [call('Creating the classifier...'),
+ call('Creating the chain...')]
+ port_groups = ['1a', '2b']
+ neutron_port = 'neutron_port_id'
+ port = 80
+ protocol = 'tcp'
+ vnffg_name = 'red_http'
+ symmetrical = True
+ serv_p = '123abc'
+ server_ip = '1.1.1.2'
+ self.neutron_client.create_sfc_flow_classifier.return_value = \
+ {'flow_classifier': {'id': 'fc_id'}}
+ self.neutron_client.create_sfc_port_chain.return_value = \
+ {'port_chain': {'id': 'pc_id'}}
+
+ expected_sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'destination_ip_prefix': server_ip,
+ 'logical_destination_port': serv_p,
+ 'protocol': protocol}
+ expected_chain_config = {'name': vnffg_name + '-port-chain',
+ 'description': 'port-chain for SFC',
+ 'port_pair_groups': port_groups,
+ 'flow_classifiers': ['fc_id'],
+ 'chain_parameters': {'symmetric': True}}
+
+ self.os_sfc.create_chain(port_groups, neutron_port, port,
+ protocol, vnffg_name, symmetrical,
+ server_port=serv_p, server_ip=server_ip)
+
+ self.neutron_client.create_sfc_flow_classifier.assert_has_calls(
+ [call({'flow_classifier': expected_sfc_classifier_params})])
+ self.neutron_client.create_sfc_port_chain.assert_has_calls(
+ [call({'port_chain': expected_chain_config})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_update_chain_symmetric(self, mock_log):
+ """
+ Checks the update_chain method
+ """
+
+ log_calls = [call('Update the chain...')]
+ vnffg_name = 'red_http'
+ fc_name = 'blue_ssh'
+ symmetrical = True
+ self.neutron_client.find_resource.return_value = \
+ {'id': 'fc_id'}
+ expected_chain_config = {'name': vnffg_name + '-port-chain',
+ 'flow_classifiers': ['fc_id'],
+ 'chain_parameters': {'symmetric': True}}
+ self.os_sfc.update_chain(vnffg_name, fc_name, symmetrical)
+ self.neutron_client.update_sfc_port_chain.assert_has_calls(
+ [call('fc_id', {'port_chain': expected_chain_config})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_swap_classifiers(self, mock_log):
+ """
+ Checks the swap_classifiers method
+ """
+
+ log_calls = [call('Swap classifiers...')]
+ vnffg_1_name = 'red_http'
+ vnffg_2_name = 'blue_ssh'
+ symmetrical = False
+ self.os_sfc.swap_classifiers(vnffg_1_name, vnffg_2_name, symmetrical)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_port_groups(self, mock_log):
+ """
+ Checks the delete_port_groups method
+ """
+ log_calls = [call('Deleting the port groups...'),
+ call('Deleting the port pairs...')]
+ self.neutron_client.list_sfc_port_pair_groups.return_value = \
+ {'port_pair_groups': [{'id': 'id_ppg1'}, {'id': 'id_ppg2'}]}
+ self.neutron_client.list_sfc_port_pairs.return_value = \
+ {'port_pairs': [{'id': 'id_pp1'}, {'id': 'id_pp2'}]}
+ self.os_sfc.delete_port_groups()
+
+ self.neutron_client.delete_sfc_port_pair_group.assert_has_calls(
+ [call('id_ppg1'), call('id_ppg2')])
+ self.neutron_client.delete_sfc_port_pair.assert_has_calls(
+ [call('id_pp1'), call('id_pp2')])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_chain(self, mock_log):
+ """
+ Checks the delete_chain method
+ """
+ log_calls = [call('Deleting the chain...'),
+ call('Deleting the classifiers...')]
+ self.neutron_client.list_sfc_port_chains.return_value = \
+ {'port_chains': [{'id': 'id_pc1'}]}
+ self.neutron_client.list_sfc_flow_classifiers.return_value = \
+ {'flow_classifiers': [{'id': 'id_fc1'}]}
+ self.os_sfc.delete_chain()
+
+ self.neutron_client.delete_sfc_port_chain.\
+ assert_has_calls([call('id_pc1')])
+ self.neutron_client.delete_sfc_flow_classifier.assert_has_calls(
+ [call('id_fc1')])
+ mock_log.info.assert_has_calls(log_calls)
+
+
+class SfcTackerSectionTesting(unittest.TestCase):
+ def setUp(self):
+ self.patcher = patch.object(tacker_client, 'Client', autospec=True)
+ self.mock_tacker_client = self.patcher.start().return_value
+
+ def tearDown(self):
+ self.patcher.stop()
+
+ @patch('os.getenv', autospec=True, return_value=None)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_tacker_client_version_returned_default(self,
+ mock_log,
+ mock_getenv):
+ """
+ Checks the proper functionality of get_tacker_client_version
+ function when the os.getenv returns none
+ """
+ result = os_sfc_utils.get_tacker_client_version()
+ self.assertEqual(result, '1.0')
+ mock_getenv.assert_called_once_with('OS_TACKER_API_VERSION')
+ mock_log.info.assert_not_called()
+
+ @patch('os.getenv', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_tacker_client_version(self,
+ mock_log,
+ mock_getenv):
+ """
+ Checks the proper functionality of get_tacker_client_version
+ function when the os.getenv returns version
+ """
+
+ ver = '2.0'
+ mock_getenv.return_value = ver
+ log_calls = [call("OS_TACKER_API_VERSION is set in env as '%s'", ver)]
+
+ result = os_sfc_utils.get_tacker_client_version()
+ self.assertEqual(result, ver)
+ mock_getenv.assert_called_once_with('OS_TACKER_API_VERSION')
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_id_from_name_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of get_id_from_name
+ function when tacker_client.list returns None
+ """
+
+ resource_name = 'mock_resource_name'
+ resource_type = 'mock_resource_type'
+ params = {'fields': 'id', 'name': resource_name}
+ collection = resource_type + 's'
+ path = '/' + collection
+ self.mock_tacker_client.list.side_effect = Exception('ErrorMSG')
+ log_calls = [call('Error [get_id_from_name(tacker_client, '
+ 'resource_type, resource_name)]: ErrorMSG')]
+
+ result = os_sfc_utils.get_id_from_name(self.mock_tacker_client,
+ resource_type,
+ resource_name)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list.assert_called_once_with(collection,
+ path,
+ **params)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.openstack_tests.get_credentials',
+ autospec=True, return_value='os_creds')
+ @patch('sfc.lib.openstack_utils.keystone_utils.keystone_session',
+ autospec=True, return_value='keystone_session_obj')
+ @patch('sfc.lib.openstack_utils.constants.ENV_FILE', autospec=True)
+ @patch('sfc.lib.openstack_utils.tackerclient.Client', autospec=True)
+ def test_get_tacker_client(self, mock_tacker_client,
+ mock_env_file,
+ mock_keystone_session,
+ mock_get_credentials):
+ """
+ checks the proper functionality of get_tacker_client
+ function
+ """
+
+ mock_tacker_client_ins = mock_tacker_client.return_value
+ result = os_sfc_utils.get_tacker_client()
+ assert result is mock_tacker_client_ins
+ mock_get_credentials.assert_called_once_with(os_env_file=mock_env_file,
+ overrides=None)
+ mock_keystone_session.assert_called_once_with('os_creds')
+ mock_tacker_client.assert_called_once_with(
+ '1.0', session='keystone_session_obj')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_id_from_name(self, mock_log):
+ """
+ Checks the proper functionality of get_id_from_name
+ function when tacker_client.list returns id
+ """
+
+ resource_name = 'mock_resource_name'
+ resource_type = 'mock_resource_type'
+ params = {'fields': 'id', 'name': resource_name}
+ collection = resource_type + 's'
+ self.mock_tacker_client.list.return_value = {collection: {0: {'id':
+ 'mock_id'}}}
+ path = '/' + collection
+ result = os_sfc_utils.get_id_from_name(self.mock_tacker_client,
+ resource_type,
+ resource_name)
+ self.assertEqual('mock_id', result)
+ self.mock_tacker_client.list.assert_called_once_with(collection,
+ path,
+ **params)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vnfd_id(self, mock_get_id):
+ """
+ Checks the proper functionality of get_vnfd_id
+ function
+ """
+
+ mock_get_id.return_value = 'id'
+ result = os_sfc_utils.get_vnfd_id(self.mock_tacker_client,
+ 'vnfd_name')
+ self.assertEqual('id', result)
+ mock_get_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnfd',
+ 'vnfd_name')
+
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vim_id(self, mock_get_id):
+ """
+ Checks the proper fucntionality of get_vim_id
+ function
+ """
+
+ mock_get_id.return_value = 'id'
+ result = os_sfc_utils.get_vim_id(self.mock_tacker_client, 'vim_name')
+ mock_get_id.assert_called_once_with(self.mock_tacker_client,
+ 'vim',
+ 'vim_name')
+ self.assertEqual('id', result)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vnf_id(self,
+ mock_get_id,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of get_vnf_id
+ function
+ """
+
+ vnf_name = 'mock_vnf'
+ log_calls = [call("Could not retrieve ID for vnf with name [%s]."
+ " Retrying." % vnf_name)]
+
+ get_id_calls = [call(self.mock_tacker_client, 'vnf', vnf_name)] * 2
+
+ mock_get_id.side_effect = [None, 'vnf_id']
+
+ result = os_sfc_utils.get_vnf_id(self.mock_tacker_client, vnf_name, 2)
+ self.assertEqual('vnf_id', result)
+ mock_sleep.assert_called_once_with(1)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_get_id.assert_has_calls(get_id_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vnffg_id(self,
+ mock_get_id,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of get_vnffg_id
+ function
+ """
+
+ vnffg_name = 'mock_vnffg'
+ log_calls = [call("Could not retrieve ID for vnffg with name [%s]."
+ " Retrying." % vnffg_name)]
+
+ get_id_calls = [call(self.mock_tacker_client, 'vnffg', vnffg_name)] * 2
+
+ mock_get_id.side_effect = [None, 'vnf_id']
+
+ result = os_sfc_utils.get_vnffg_id(self.mock_tacker_client,
+ vnffg_name,
+ 2)
+ self.assertEqual('vnf_id', result)
+ mock_sleep.assert_called_once_with(1)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_get_id.assert_has_calls(get_id_calls)
+
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vnffgd_id(self, mock_get_id):
+ """
+ Checks the proper functionality of get_vnffgd_id
+ function
+ """
+
+ mock_get_id.return_value = 'id'
+ result = os_sfc_utils.get_vnffgd_id(self.mock_tacker_client,
+ 'vnffgd_name')
+ mock_get_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnffgd',
+ 'vnffgd_name')
+ self.assertEqual('id', result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnfds_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vnfds
+ function when the list_vnfds returns none
+ """
+
+ log_calls = [call('Error [list_vnfds(tacker_client)]: ErrorMSG')]
+ self.mock_tacker_client.list_vnfds.side_effect = Exception('ErrorMSG')
+ result = os_sfc_utils.list_vnfds(self.mock_tacker_client)
+ mock_log.error.assert_has_calls(log_calls)
+ self.mock_tacker_client.list_vnfds.assert_called_once_with(
+ retrieve_all=True)
+ self.assertIsNone(result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnfds(self, mock_log):
+ """
+ Checks the proper functionality of list_vnfds
+ function when the list_vnfds returns vnfds
+ """
+
+ vnfds = {
+ 'vnfds': [{'id': 1},
+ {'id': 2}]
+ }
+ self.mock_tacker_client.list_vnfds.return_value = vnfds
+ result = os_sfc_utils.list_vnfds(self.mock_tacker_client)
+ self.mock_tacker_client.list_vnfds.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.assert_not_called()
+ self.assertEqual([1, 2], result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnfd_returned_none_tosca_file_not_provided(self, mock_log):
+ """
+ Checks the proper functionality of create_vnfd
+ function when an exception is raised
+ """
+
+ log_calls = [call("Creating the vnfd..."),
+ call("Error [create_vnfd(tacker_client, 'None')]: "
+ "ErrorMSG")]
+
+ self.mock_tacker_client.create_vnfd.side_effect = Exception('ErrorMSG')
+ result = os_sfc_utils.create_vnfd(self.mock_tacker_client,
+ None,
+ 'vnfd_name')
+ self.assertIsNone(result)
+ self.mock_tacker_client.create_vnfd.assert_called_once_with(
+ body={'vnfd': {'attributes': {'vnfd': {}},
+ 'name': 'vnfd_name'}})
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnfd_returned_none_tosca_file_provided(self,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnfd
+ function when an exception is raised
+ """
+
+ log_calls = [call("Creating the vnfd..."),
+ call("VNFD template:\nmock_vnfd"),
+ call("Error [create_vnfd(tacker_client, 'tosca_file')]: "
+ "ErrorMSG")]
+
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_vnfd'
+ mock_safe_load.return_value = 'mock_vnfd_body'
+ self.mock_tacker_client.create_vnfd.side_effect = Exception('ErrorMSG')
+ result = os_sfc_utils.create_vnfd(self.mock_tacker_client,
+ 'tosca_file',
+ 'vnfd_name')
+ self.assertIsNone(result)
+ mock_open.assert_called_once_with('tosca_file')
+ open_handler.read.assert_called_once_with()
+ mock_safe_load.assert_called_once_with('mock_vnfd')
+ mock_log.info.assert_has_calls(log_calls[:2])
+ mock_log.error.assert_has_calls(log_calls[2:])
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnfd(self,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnfd
+ function
+ """
+
+ log_calls = [call("VNFD template:\nmock_vnfd")]
+
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_vnfd'
+ mock_safe_load.return_value = 'mock_vnfd_body'
+ result = os_sfc_utils.create_vnfd(self.mock_tacker_client,
+ 'tosca_file',
+ 'vnfd_name')
+ assert result is self.mock_tacker_client.create_vnfd.return_value
+ self.mock_tacker_client.create_vnfd.assert_called_once_with(
+ body={"vnfd": {"attributes": {"vnfd": "mock_vnfd_body"},
+ "name": "vnfd_name"}})
+ mock_open.assert_called_once_with('tosca_file')
+ open_handler.read.assert_called_once_with()
+ mock_safe_load.assert_called_once_with('mock_vnfd')
+ mock_log.info.assert_has_calls(log_calls)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vnfd_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vnfd
+ function when an exception is raised
+ """
+
+ log_calls = [call("Error [delete_vnfd(tacker_client, 'None', 'None')]:"
+ " You need to provide VNFD id or VNFD name")]
+
+ result = os_sfc_utils.delete_vnfd(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vnfd_id',
+ autospec=True, return_value='vnfd')
+ def test_delete_vnfd(self, mock_get_vnfd_id):
+ """
+ Checks the proper functionality of delete_vnfd
+ function
+ """
+
+ result = os_sfc_utils.delete_vnfd(self.mock_tacker_client,
+ None,
+ 'vnfd_name')
+ assert result is self.mock_tacker_client.delete_vnfd.return_value
+ mock_get_vnfd_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnfd_name')
+ self.mock_tacker_client.delete_vnfd.assert_called_once_with('vnfd')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnfs_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vnfs
+ function
+ """
+
+ log_calls = [call("Error [list_vnfs(tacker_client)]: ErrorMSG")]
+
+ self.mock_tacker_client.list_vnfs.side_effect = Exception('ErrorMSG')
+ result = os_sfc_utils.list_vnfs(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list_vnfs.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.error.assert_has_calls(log_calls)
+
+ def test_list_vnfs(self):
+ """
+ Checks the proper functionality of list_vnfs
+ function
+ """
+ vnfs = {'vnfs': [{'id': 1},
+ {'id': 2}]}
+
+ self.mock_tacker_client.list_vnfs.return_value = vnfs
+ result = os_sfc_utils.list_vnfs(self.mock_tacker_client)
+ self.assertEqual([1, 2], result)
+ self.mock_tacker_client.list_vnfs.assert_called_once_with(
+ retrieve_all=True)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnf_returned_none_vnfd_not_provided(self, mock_log):
+ """
+ Checks the proper functionality of create_vnf
+ function when an exception is raised
+ """
+
+ log_calls = [call("Creating the vnf..."),
+ call("error [create_vnf(tacker_client,"
+ " 'vnf_name', 'None', 'None')]: "
+ "vnfd id or vnfd name is required")]
+ result = os_sfc_utils.create_vnf(self.mock_tacker_client, 'vnf_name')
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnf_returned_none_vnfd_provided(self, mock_log):
+ """
+ Checks the proper functionality of create_vnf
+ function when an exception is raised
+ """
+
+ log_calls = [call("Creating the vnf..."),
+ call("error [create_vnf(tacker_client,"
+ " 'vnf_name', 'None', 'vnfd_name')]: "
+ "vim id or vim name is required")]
+ result = os_sfc_utils.create_vnf(self.mock_tacker_client,
+ 'vnf_name',
+ None,
+ 'vnfd_name',
+ None,
+ None)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vim_id',
+ autospec=True, return_value='vim_id')
+ @patch('sfc.lib.openstack_utils.get_vnfd_id',
+ autospec=True, return_value='vnfd_id')
+ def test_create_vnf_vim_id_not_provided(self,
+ mock_get_vnfd_id,
+ mock_get_vim_id,
+ mock_log,
+ mock_open):
+ """
+ Checks the proper functionality of create_vnf
+ function
+ """
+ mock_body = {'vnf': {'attributes': {'param_values': 'mock_data'},
+ 'vim_id': 'vim_id',
+ 'name': 'vnf_name',
+ 'vnfd_id': 'vnfd_id'}}
+ log_calls = [call('Creating the vnf...')]
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_data'
+ result = os_sfc_utils.create_vnf(self.mock_tacker_client,
+ 'vnf_name',
+ None,
+ 'vnfd_name',
+ None,
+ 'vim_name',
+ 'param_file')
+
+ assert result is self.mock_tacker_client.create_vnf.return_value
+ mock_get_vnfd_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnfd_name')
+ mock_get_vim_id.assert_called_once_with(self.mock_tacker_client,
+ 'vim_name')
+ mock_log.info.assert_has_calls(log_calls)
+ self.mock_tacker_client.create_vnf.assert_called_once_with(
+ body=mock_body)
+
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnf_vim_id_provided(self, mock_log, mock_open):
+ """
+ Checks the proper functionality of create_vnf
+ function
+ """
+ mock_body = {'vnf': {'attributes': {},
+ 'vim_id': 'vim_id',
+ 'name': 'vnf_name',
+ 'vnfd_id': 'vnfd_id'}}
+ log_calls = [call('Creating the vnf...')]
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_data'
+
+ result = os_sfc_utils.create_vnf(self.mock_tacker_client,
+ 'vnf_name',
+ 'vnfd_id',
+ 'vnfd_name',
+ 'vim_id',
+ 'vim_name')
+ assert result is self.mock_tacker_client.create_vnf.return_value
+ mock_log.info.assert_has_calls(log_calls)
+ self.mock_tacker_client.create_vnf.assert_called_once_with(
+ body=mock_body)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_vnf_returned_none_vnf_not_provided(self, mock_log):
+ """
+ Checks the proper functionality of get_vnf
+ functionality when an exception is raised
+ """
+
+ log_calls = [call("Could not retrieve VNF [vnf_id=None, vnf_name=None]"
+ " - You must specify vnf_id or vnf_name")]
+
+ result = os_sfc_utils.get_vnf(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vnf_id',
+ autospec=True, return_value=None)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_vnf_returned_none_vnf_provided(self,
+ mock_log,
+ mock_get_vnf_id):
+ """
+ Checks the proper functionality of get_vnf
+ functionality when an exception is raised
+ """
+
+ log_calls = [call("Could not retrieve VNF [vnf_id=None, "
+ "vnf_name=vnf_name] - Could not retrieve ID from "
+ "name [vnf_name]")]
+ result = os_sfc_utils.get_vnf(self.mock_tacker_client,
+ None,
+ 'vnf_name')
+ self.assertIsNone(result)
+ mock_get_vnf_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_name')
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.list_vnfs', autospec=True)
+ def test_get_vnf(self,
+ mock_list_vnfs,
+ mock_log):
+ """
+ Checks the proper functionality of get_vnf
+ function
+ """
+
+ vnf = {'vnfs': [{'id': 'default'},
+ {'id': 'vnf_id'}]}
+
+ mock_list_vnfs.return_value = vnf
+ result = os_sfc_utils.get_vnf(self.mock_tacker_client, 'vnf_id', None)
+ self.assertDictEqual(vnf['vnfs'][1], result)
+ mock_log.error.assert_not_called()
+
+ @patch('json.loads', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospe=True)
+ def test_get_vnf_ip(self,
+ mock_get_vnf,
+ mock_json_loads):
+ """
+ Checks the proper functionality of get_vnf_ip
+ function
+ """
+
+ vnf = {"mgmt_url": {"VDU1": "192.168.120.3"}}
+ mock_get_vnf.return_value = vnf
+ mock_json_loads.return_value = vnf['mgmt_url']
+ result = os_sfc_utils.get_vnf_ip(self.mock_tacker_client)
+ self.assertEqual("192.168.120.3", result)
+ mock_get_vnf.assert_called_once_with(self.mock_tacker_client,
+ None,
+ None)
+ mock_json_loads.assert_called_once_with(vnf['mgmt_url'])
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospec=True)
+ def test_wait_for_vnf_returned_none_unable_to_retrieve_vnf(self,
+ mock_get_vnf,
+ mock_log):
+ """
+ Checks the proper functionality of wait_for_vnf
+ function when an Exception is raised
+ """
+
+ mock_get_vnf.return_value = None
+ log_calls = [call("error [wait_for_vnf(tacker_client, 'vnf_id', "
+ "'vnf_name')]: Could not retrieve VNF - id='vnf_id',"
+ " name='vnf_name'")]
+
+ result = os_sfc_utils.wait_for_vnf(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name',
+ 0)
+ self.assertIsNone(result)
+ mock_get_vnf.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name')
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospec=True)
+ def test_wait_for_vnf_returned_none_unable_to_boot_vnf(self,
+ mock_get_vnf,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of wait_for_vnf
+ function when an Exception is raised
+ """
+
+ mock_vnf_values = [{'id': 'vnf_id',
+ 'status': 'ERROR'},
+ {'id': 'vnf_id',
+ 'status': 'PEDNING_CREATE'}]
+ mock_get_vnf.side_effect = mock_vnf_values
+ log_calls = [call("Waiting for vnf %s" % str(mock_vnf_values[0])),
+ call("error [wait_for_vnf(tacker_client, 'vnf_id', "
+ "'vnf_name')]: Error when booting vnf vnf_id")]
+
+ result = os_sfc_utils.wait_for_vnf(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name',
+ 0)
+ self.assertIsNone(result)
+ mock_get_vnf.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name')
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospec=True)
+ def test_wait_for_vnf_returned_none_timeout_booting_vnf(self,
+ mock_get_vnf,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of wait_for_vnf
+ function when an Exception is raised
+ """
+
+ mock_vnf_values = [{'id': 'vnf_id',
+ 'status': 'PENDING_CREATE'},
+ {'id': 'vnf_id',
+ 'status': 'PENDING_CREATE'}]
+ mock_get_vnf.side_effect = mock_vnf_values
+ log_calls = [call("Waiting for vnf %s" % str(mock_vnf_values[1])),
+ call("error [wait_for_vnf(tacker_client, 'vnf_id', "
+ "'vnf_name')]: Timeout when booting vnf vnf_id")]
+
+ result = os_sfc_utils.wait_for_vnf(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name',
+ 0)
+ self.assertIsNone(result)
+ mock_get_vnf.assert_called_with(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name')
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospec=True)
+ def test_wait_for_vnf(self,
+ mock_get_vnf,
+ mock_log,
+ mock_sleep):
+ """
+ Checks for the proper functionality of wait_for_vnf
+ function
+ """
+
+ mock_vnf_values = [{'status': 'PENDING_CREATE',
+ 'id': 'vnf_id'},
+ {'status': 'ACTIVE',
+ 'id': 'vnf_id'}]
+
+ log_calls = [call("Waiting for vnf %s" % mock_vnf_values[0])]
+
+ mock_get_vnf.side_effect = mock_vnf_values
+
+ result = os_sfc_utils.wait_for_vnf(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name',
+ 3)
+ self.assertEqual('vnf_id', result)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vnf_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vnf
+ function
+ """
+
+ log_calls = [call("Error [delete_vnf(tacker_client, 'None', 'None')]:"
+ " You need to provide a VNF id or name")]
+ result = os_sfc_utils.delete_vnf(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf_id', autospec=True)
+ def test_delete_vnf(self,
+ mock_get_vnf_id,
+ mock_log):
+ """
+ Checks the proper functionality of delete_vnf
+ function
+ """
+
+ mock_get_vnf_id.return_value = 'vnf'
+ result = os_sfc_utils.delete_vnf(self.mock_tacker_client,
+ None,
+ 'vnf_name')
+ assert result is self.mock_tacker_client.delete_vnf.return_value
+ mock_get_vnf_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_name')
+ self.mock_tacker_client.delete_vnf.assert_called_once_with('vnf')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vim_returned_none(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_vim
+ function when the vim_file is not provided
+ """
+
+ self.mock_tacker_client.create_vim.side_effect = Exception('ErrorMSG')
+ log_calls = [[call("Creating the vim...")],
+ [call("Error [create_vim(tacker_client, 'None')]"
+ ": ErrorMSG")]]
+
+ result = os_sfc_utils.create_vim(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.create_vim.assert_called_once_with(body={})
+ mock_log.info.assert_has_calls(log_calls[0])
+ mock_log.error.assert_has_calls(log_calls[1])
+
+ @patch('json.load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vim(self,
+ mock_log,
+ mock_open,
+ mock_json_loads):
+ """
+ Checks the proper functionality of create_vim
+ function
+ """
+
+ log_calls = [call("Creating the vim..."),
+ call("VIM template:\nmock_data")]
+
+ open_handler = mock_open.return_value.__enter__.return_value
+ mock_json_loads.return_value = 'mock_data'
+ result = os_sfc_utils.create_vim(self.mock_tacker_client, 'vim_file')
+ assert result is self.mock_tacker_client.create_vim.return_value
+ mock_log.info.assert_has_calls(log_calls)
+ mock_open.assert_called_once_with('vim_file')
+ mock_json_loads.assert_called_once_with(open_handler)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnffgd_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of create_vnffgd
+ function when create_vnffgd raises an Exception
+ """
+
+ self.mock_tacker_client.create_vnffgd.side_effect = Exception(
+ 'ErrorMSG')
+ log_calls = [[call("Creating the vnffgd...")],
+ [call("Error [create_vnffgd(tacker_client, 'None')]"
+ ": ErrorMSG")]]
+
+ result = os_sfc_utils.create_vnffgd(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls[0])
+ mock_log.error.assert_has_calls(log_calls[1])
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnffgd(self,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnffgd
+ function
+ """
+
+ log_calls = [call('Creating the vnffgd...'),
+ call('VNFFGD template:\nmock_data')]
+
+ vnffgd_body = {'id': 0, 'type': 'dict'}
+
+ mock_vim_body = {'vnffgd': {'name': 'vnffgd_name',
+ 'template': {'vnffgd': vnffgd_body}}}
+
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_data'
+ mock_safe_load.return_value = {'id': 0, 'type': 'dict'}
+ result = os_sfc_utils.create_vnffgd(self.mock_tacker_client,
+ 'tosca_file',
+ 'vnffgd_name')
+ assert result is self.mock_tacker_client.create_vnffgd.return_value
+ mock_open.assert_called_once_with('tosca_file')
+ mock_safe_load.assert_called_once_with('mock_data')
+ self.mock_tacker_client.create_vnffgd.assert_called_once_with(
+ body=mock_vim_body)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnffg_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of create_vnffg
+ function when the vnffgd id or vnffg name is not provided
+ """
+
+ log_calls = [[call("Creating the vnffg...")],
+ [call("error [create_vnffg(tacker_client,"
+ " 'None', 'None', 'None')]: "
+ "vnffgd id or vnffgd name is required")]]
+
+ result = os_sfc_utils.create_vnffg(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls[0])
+ mock_log.error.assert_has_calls(log_calls[1])
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnffgd_id', autospec=True)
+ def test_create_vnffg_vnffgd_id_not_provided(self,
+ mock_get_vnffgd_id,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnffg
+ function when the vnffgd id or vnffg name is not provided
+ """
+
+ log_calls = [call('Creating the vnffg...')]
+ vnffg_calls = [call(body={
+ 'vnffg': {
+ 'attributes': {'param_values': {'type': 'dict',
+ 'id': 0}},
+ 'vnffgd_id': 'mocked_vnffg_id',
+ 'name': 'vnffg_name',
+ 'symmetrical': False}})]
+ mock_get_vnffgd_id.return_value = 'mocked_vnffg_id'
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'data'
+ mock_safe_load.return_value = {'id': 0, 'type': 'dict'}
+
+ result = os_sfc_utils.create_vnffg(self.mock_tacker_client,
+ 'vnffg_name',
+ None,
+ 'vnffgd_name',
+ 'param_file')
+ assert result is self.mock_tacker_client.create_vnffg.return_value
+ mock_open.assert_called_once_with('param_file')
+ open_handler.read.assert_called_once_with()
+ mock_get_vnffgd_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnffgd_name')
+ mock_safe_load.assert_called_once_with('data')
+ mock_log.info.assert_has_calls(log_calls)
+ self.mock_tacker_client.create_vnffg.assert_has_calls(vnffg_calls)
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnffg_vnffgd_id_provided(self,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnffg
+ function when the vnffgd id or vnffg name is not provided
+ """
+
+ log_calls = [call('Creating the vnffg...')]
+ vnffg_calls = [call(body={
+ 'vnffg': {
+ 'attributes': {'param_values': {'type': 'dict',
+ 'id': 0}},
+ 'vnffgd_id': 'vnffgd_id',
+ 'name': 'vnffg_name',
+ 'symmetrical': False}})]
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'data'
+ mock_safe_load.return_value = {'id': 0, 'type': 'dict'}
+
+ result = os_sfc_utils.create_vnffg(self.mock_tacker_client,
+ 'vnffg_name',
+ 'vnffgd_id',
+ 'vnffgd_name',
+ 'param_file')
+ assert result is self.mock_tacker_client.create_vnffg.return_value
+ mock_open.assert_called_once_with('param_file')
+ open_handler.read.assert_called_once_with()
+ mock_safe_load.assert_called_once_with('data')
+ mock_log.info.assert_has_calls(log_calls)
+ self.mock_tacker_client.create_vnffg.assert_has_calls(vnffg_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnffgds_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vnffgds
+ function when list_vnffgds raises an Exception
+ """
+
+ self.mock_tacker_client.list_vnffgds.side_effect = Exception(
+ 'ErrorMSG')
+ log_calls = [call('Error [list_vnffgds(tacker_client)]: ErrorMSG')]
+
+ result = os_sfc_utils.list_vnffgds(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list_vnffgds.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.error.assert_has_calls(log_calls)
+
+ def test_list_vnffgds(self):
+ """
+ Checks the proper functtionality of list_vnffgds
+ function
+ """
+
+ vnffgds = {'vnffgds': [{'id': 'vnffgd_obj_one'},
+ {'id': 'vnffgd_obj_two'}]}
+
+ mock_vnffgds = ['vnffgd_obj_one', 'vnffgd_obj_two']
+
+ self.mock_tacker_client.list_vnffgds.return_value = vnffgds
+ result = os_sfc_utils.list_vnffgds(self.mock_tacker_client)
+ self.assertEqual(mock_vnffgds, result)
+ self.mock_tacker_client.list_vnffgds.assert_called_once_with(
+ retrieve_all=True)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnffgs_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vnffgs
+ function when list_vnffgs raises an Exception
+ """
+
+ self.mock_tacker_client.list_vnffgs.side_effect = Exception('ErrorMSG')
+ log_calls = [call('Error [list_vnffgs(tacker_client)]: ErrorMSG')]
+
+ result = os_sfc_utils.list_vnffgs(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list_vnffgs.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.error.assert_has_calls(log_calls)
+
+ def test_list_vnffgs(self):
+ """
+ Checks the proper functionality of list_vnffgs
+ function
+ """
+
+ vnffgs = {'vnffgs': [{'id': 'vnffg_obj_one'},
+ {'id': 'vnffg_obj_two'}]}
+
+ mock_vnffgs = ['vnffg_obj_one', 'vnffg_obj_two']
+
+ self.mock_tacker_client.list_vnffgs.return_value = vnffgs
+ result = os_sfc_utils.list_vnffgs(self.mock_tacker_client)
+ self.assertEqual(mock_vnffgs, result)
+ self.mock_tacker_client.list_vnffgs.assert_called_once_with(
+ retrieve_all=True)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vnffg_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vnffg
+ function
+ """
+
+ log_calls = [call("Error [delete_vnffg(tacker_client, 'None', 'None')]"
+ ": You need to provide a VNFFG id or name")]
+
+ result = os_sfc_utils.delete_vnffg(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vnffg_id',
+ autospec=True, return_value='vnffg')
+ def test_delete_vnffg(self, mock_get_vnffg_id):
+ """
+ Checks the proper functionality of delete_vnffg
+ function
+ """
+
+ self.mock_tacker_client.delete_vnffg.return_value = 'deleted'
+ result = os_sfc_utils.delete_vnffg(self.mock_tacker_client,
+ None,
+ 'vnffg_name')
+ self.assertEqual('deleted', result)
+ mock_get_vnffg_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnffg_name')
+ self.mock_tacker_client.delete_vnffg.assert_called_once_with('vnffg')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vnffgd_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vnffgd
+ function
+ """
+
+ log_calls = [call("Error [delete_vnffgd(tacker_client, 'None', 'None')"
+ "]: You need to provide VNFFGD id or VNFFGD name")]
+
+ result = os_sfc_utils.delete_vnffgd(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vnffgd_id',
+ autospec=True, return_value='vnffgd')
+ def test_delete_vnffgd(self, mock_get_vnffgd_id):
+ """
+ Checks the proper functionality of delete_vnffgd
+ function
+ """
+
+ self.mock_tacker_client.delete_vnffgd.return_value = 'deleted'
+ result = os_sfc_utils.delete_vnffgd(self.mock_tacker_client,
+ None,
+ 'vnffgd_name')
+ self.assertEqual('deleted', result)
+ mock_get_vnffgd_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnffgd_name')
+ self.mock_tacker_client.delete_vnffgd.assert_called_once_with('vnffgd')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vims_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vims
+ function when VNFFGD id is not provided
+ """
+
+ self.mock_tacker_client.list_vims.side_effect = Exception('ErrorMSG')
+ log_calls = [call('Error [list_vims(tacker_client)]: ErrorMSG')]
+
+ result = os_sfc_utils.list_vims(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list_vims.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.error.assert_has_calls(log_calls)
+
+ def test_list_vims(self):
+ """
+ Checks the proper functionality list_vims
+ function
+ """
+
+ vims = {'vims': [{'id': 'vim_obj_1'},
+ {'id': 'vim_obj_2'}]}
+
+ mock_vims = ['vim_obj_1', 'vim_obj_2']
+
+ self.mock_tacker_client.list_vims.return_value = vims
+ result = os_sfc_utils.list_vims(self.mock_tacker_client)
+ self.assertEqual(mock_vims, result)
+ self.mock_tacker_client.list_vims.assert_called_once_with(
+ retrieve_all=True)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vim_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vim
+ function when VIM id and VIM name is not provided
+ """
+
+ log_calls = [call("Error [delete_vim(tacker_client, '%s', '%s')]: %s"
+ % (None, None, 'You need to provide '
+ 'VIM id or VIM name'))]
+
+ result = os_sfc_utils.delete_vim(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vim_id',
+ autospec=True, return_value='vim_id')
+ def test_delete_vim(self, mock_get_vim_id):
+ """
+ Checks the proper functionality of delete_vim
+ function
+ """
+
+ result = os_sfc_utils.delete_vim(self.mock_tacker_client,
+ None,
+ 'vim_name')
+ assert result is self.mock_tacker_client.delete_vim.return_value
+ mock_get_vim_id.assert_called_once_with(self.mock_tacker_client,
+ 'vim_name')
+ self.mock_tacker_client.delete_vim.assert_called_once_with('vim_id')
+
+ @patch('sfc.lib.openstack_utils.get_tacker_client',
+ autospec=True, return_value='tacker_client_obj')
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_tacker_items(self,
+ mock_log,
+ mock_tacker_client):
+ """
+ Checks the proper functionality of get_tacker_items
+ function
+ """
+
+ mock_dict = {'list_vims': DEFAULT,
+ 'list_vnfds': DEFAULT,
+ 'list_vnfs': DEFAULT,
+ 'list_vnffgds': DEFAULT,
+ 'list_vnffgs': DEFAULT}
+ with patch.multiple('sfc.lib.openstack_utils',
+ **mock_dict) as mock_values:
+
+ os_sfc_utils.get_tacker_items()
+
+ mock_tacker_client.assert_called_once_with()
+ self.assertEqual(5, mock_log.debug.call_count)
+ for key in mock_values:
+ mock_values[key].assert_called_once_with('tacker_client_obj')
+
+ @patch('json.dump', autospec=True)
+ @patch('json.load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.create_vim', autospec=True)
+ def test_register_vim(self,
+ mock_create_vim,
+ mock_open,
+ mock_json_loads,
+ mock_json_dump):
+ """
+ Checks the proper functionality of register_vim
+ function
+ """
+
+ tmp_file = '/tmp/register-vim.json'
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_calls = [call('vim_file'),
+ call(tmp_file, 'w')]
+
+ mock_json_loads.return_value = {'vim': {'auth_cred':
+ {'password': None},
+ 'auth_url': None}}
+
+ json_dict = {'vim': {'auth_cred': {'password': 'os_auth_cred'},
+ 'auth_url': 'os_auth_url'}}
+
+ patch_dict = {'OS_AUTH_URL': 'os_auth_url',
+ 'OS_PASSWORD': 'os_auth_cred'}
+
+ with patch.dict('os.environ', patch_dict):
+ os_sfc_utils.register_vim(self.mock_tacker_client, 'vim_file')
+ mock_json_loads.assert_called_once_with(open_handler)
+ mock_json_dump.assert_called_once_with(json_dict,
+ mock_open(tmp_file, 'w'))
+ mock_open.assert_has_calls(open_calls, any_order=True)
+ mock_create_vim.assert_called_once_with(self.mock_tacker_client,
+ vim_file=tmp_file)
+
+ @patch('json.dump', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.create_vnf', autospec=True)
+ @patch('os.path.join',
+ autospec=True, return_value='/tmp/param_av_zone.json')
+ def test_create_vnf_in_av_zone(self,
+ mock_path_join,
+ mock_create_vnf,
+ mock_open,
+ mock_json_dump):
+ """
+ Checks the proper fucntionality of test_create_vnf_in_av_zone
+ fucntion
+ """
+
+ data = {'zone': 'av::zone'}
+ param_file = '/tmp/param_av_zone.json'
+ os_sfc_utils.create_vnf_in_av_zone(self.mock_tacker_client,
+ 'vnf_name',
+ 'vnfd_name',
+ 'vim_name',
+ 'param_file',
+ 'av::zone')
+ open_handler = mock_open.return_value.__enter__.return_value
+ mock_path_join.assert_called_once_with('/tmp', 'param_av_zone.json')
+ mock_open.assert_called_once_with(param_file, 'w+')
+ mock_json_dump.assert_called_once_with(data, open_handler)
+ mock_create_vnf.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_name',
+ vnfd_name='vnfd_name',
+ vim_name='vim_name',
+ param_file=param_file)
+
+ @patch('json.dump', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.create_vnffg', autospec=True)
+ @patch('os.path.join', autospec=True, return_value='/tmp/param_name.json')
+ def test_create_vnffg_with_param_file(self,
+ mock_path_join,
+ mock_create_vnffg,
+ mock_open,
+ mock_json_dump):
+ """
+ Checks the proper functionality of create_vnffg_with_param_file
+ function
+ """
+
+ data = {
+ 'ip_dst_prefix': 'server_ip',
+ 'net_dst_port_id': 'server_port',
+ 'net_src_port_id': 'client_port'
+ }
+ param_file = '/tmp/param_name.json'
+ os_sfc_utils.create_vnffg_with_param_file(self.mock_tacker_client,
+ 'vnffgd_name',
+ 'vnffg_name',
+ 'default_param_file',
+ 'client_port',
+ 'server_port',
+ 'server_ip')
+ open_handler = mock_open.return_value.__enter__.return_value
+ mock_path_join.assert_called_once_with('/tmp', 'param_vnffg_name.json')
+ mock_open.assert_called_once_with(param_file, 'w+')
+ mock_json_dump.assert_called_once_with(data, open_handler)
+ mock_create_vnffg.assert_called_once_with(self.mock_tacker_client,
+ vnffgd_name='vnffgd_name',
+ vnffg_name='vnffg_name',
+ param_file=param_file,
+ symmetrical=True)
diff --git a/sfc/unit_tests/unit/lib/test_test_utils.py b/sfc/unit_tests/unit/lib/test_test_utils.py
new file mode 100644
index 00000000..a7d2bfde
--- /dev/null
+++ b/sfc/unit_tests/unit/lib/test_test_utils.py
@@ -0,0 +1,543 @@
+#!/usr/bin/env python
+
+###############################################################################
+# Copyright (c) 2018 Venkata Harshavardhan Reddy Allu and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+import unittest
+
+from mock import Mock
+from mock import call
+from mock import patch
+
+import sfc.lib.test_utils as test_utils
+
+__author__ = "Harshavardhan Reddy <venkataharshavardhan_ven@srmuniv.edu.in>"
+
+
+class SfcTestUtilsTesting(unittest.TestCase):
+
+ def setUp(self):
+ self.ip = '10.10.10.10'
+
+ @patch('subprocess.PIPE', autospec=True)
+ @patch('subprocess.Popen', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ def test_run_cmd(self,
+ mock_log,
+ mock_popen,
+ mock_pipe):
+ """
+ Checks the proper functionality of run_cmd
+ function
+ """
+
+ cmd = 'mock_command'
+ log_calls = [call('Running [mock_command] returns: [0] ' +
+ '- STDOUT: "output" - STDERR: "output"')]
+
+ pipe_mock = Mock()
+ attrs = {'communicate.return_value': ('output', 'error'),
+ 'returncode': 0}
+ pipe_mock.configure_mock(**attrs)
+ mock_popen.return_value = pipe_mock
+ result = test_utils.run_cmd(cmd)
+ self.assertTupleEqual(result, (0, 'output', 'error'))
+ mock_popen.assert_called_once_with(cmd,
+ shell=True,
+ stdout=mock_pipe,
+ stderr=mock_pipe)
+ mock_popen.return_value.communicate.assert_called_once_with()
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.test_utils.run_cmd', autospec=True)
+ def test_run_cmd_remote(self, mock_run_cmd):
+ """
+ Checks the proper functionality of the run_cmd_remote
+ function
+ """
+
+ cmd = 'mock_command'
+ mock_rc = 'sshpass -p opnfv ssh -q -o UserKnownHostsFile=/dev/null' + \
+ ' -o StrictHostKeyChecking=no -o ConnectTimeout=50 ' + \
+ ' root@10.10.10.10 mock_command'
+ test_utils.run_cmd_remote(self.ip, cmd)
+ mock_run_cmd.assert_called_once_with(mock_rc)
+
+ @patch('shutil.copyfileobj')
+ @patch('urllib.urlopen', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ def test_download_url_with_exception(self,
+ mock_open,
+ mock_urlopen,
+ mock_copyfileobj):
+ """
+ Checks the proper functionality of download_url
+ function when an exception is raised
+ """
+
+ dest_path = 'mocked_/dest_/path'
+ url = 'mocked_/url'
+ mock_urlopen.side_effect = Exception('HttpError')
+ self.assertFalse(test_utils.download_url(url, dest_path))
+ mock_urlopen.assert_called_once_with(url)
+ mock_open.assert_not_called()
+ mock_copyfileobj.assert_not_called()
+
+ @patch('urllib.urlopen', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('shutil.copyfileobj', autospec=True)
+ def test_download_url_without_exception(self,
+ mock_copyfileobj,
+ mock_open,
+ mock_urlopen):
+ """
+ Checks the proper functionality of download_url
+ function when any exception isn't raised
+ """
+
+ response = '<mocked_response>'
+ dest_path = 'mocked_/dest_/path'
+ url = 'mocked_/url'
+ open_handler = mock_open.return_value.__enter__.return_value
+ mock_urlopen.return_value = response
+ self.assertTrue(test_utils.download_url(url, dest_path))
+ mock_urlopen.assert_called_once_with(url)
+ mock_open.assert_called_once_with('mocked_/dest_/path/url', 'wb')
+ mock_copyfileobj.assert_called_once_with(response, open_handler)
+
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.download_url', autospec=True)
+ @patch('os.path.isfile', autospec=True, return_value=False)
+ @patch('os.path.dirname', autospec=True, return_value='mocked_')
+ @patch('os.path.basename', autospec=True, return_value='image_path')
+ def test_download_image_file_not_found(self,
+ mock_basename,
+ mock_dirname,
+ mock_isfile,
+ mock_download_url,
+ mock_log):
+ """
+ Checks the proper functionality of download_image
+ function when the image file was not found locally
+ """
+
+ url = 'mocked_/url'
+ image_path = 'mocked_/image_path'
+ log_calls = [call('Downloading image')]
+ test_utils.download_image(url, image_path)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_basename.assert_called_once_with(image_path)
+ mock_dirname.assert_called_once_with(image_path)
+ mock_isfile.assert_called_once_with(image_path)
+ mock_download_url.assert_called_once_with('mocked_/url/image_path',
+ 'mocked_')
+
+ @patch('sfc.lib.test_utils.download_url')
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('os.path.isfile', autospec=True, return_value=True)
+ @patch('os.path.dirname', autospec=True, return_value='mocked_')
+ @patch('os.path.basename', autospec=True, return_value='image_path')
+ def test_download_image_file_found(self,
+ mock_basename,
+ mock_dirname,
+ mock_isfile,
+ mock_log,
+ mock_download_url):
+ """
+ Checks the proper functionality of download_image
+ function when the image file was found locally
+ """
+
+ url = 'mocked_/url'
+ image_path = 'mocked_/image_path'
+ log_calls = [call('Using old image')]
+ test_utils.download_image(url, image_path)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_basename.assert_called_once_with(image_path)
+ mock_dirname.assert_called_once_with(image_path)
+ mock_isfile.assert_called_once_with(image_path)
+ mock_download_url.assert_not_called()
+
+ @patch('sfc.lib.test_utils.run_cmd', autospec=True)
+ def test_ping_gets_error(self, mock_run_cmd):
+ """
+ Checks the proper functionality of ping
+ function when run_cmd returns non-zero returncode
+ """
+
+ mock_cmd = 'ping -c1 -w1 %s' % self.ip
+ mock_run_cmd.return_value = (1, '', '')
+ self.assertFalse(test_utils.ping(self.ip, 1))
+ mock_run_cmd.assert_called_once_with(mock_cmd)
+
+ @patch('sfc.lib.test_utils.run_cmd', autospec=True)
+ def test_ping_gets_no_error(self, mock_run_cmd):
+ """
+ Checks the proper functionality of ping
+ function when run_cmd returns zero as returncode
+ """
+
+ mock_cmd = 'ping -c1 -w1 %s' % self.ip
+ mock_run_cmd.return_value = (0, '', '')
+ self.assertTrue(test_utils.ping(self.ip, 1))
+ mock_run_cmd.assert_called_once_with(mock_cmd)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_start_http_server_returned_false_failed_to_start(
+ self, mock_run_cmd_remote, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of start_http_server
+ function when http_server is failed to start
+ """
+
+ cmd = "\'python -m SimpleHTTPServer 80 " + \
+ "> /dev/null 2>&1 &\'"
+
+ rcr_calls = [call(self.ip, cmd),
+ call(self.ip, 'ps aux | grep SimpleHTTPServer')]
+ log_calls = [call('Failed to start http server')]
+
+ mock_run_cmd_remote.side_effect = [('', '', ''),
+ ('', '', '')]
+
+ result = test_utils.start_http_server(self.ip, 1)
+ self.assertFalse(result)
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_sleep.assert_called_once_with(3)
+ mock_log.error.assert_has_calls(log_calls)
+ mock_log.info.assert_not_called()
+ mock_log.debug.assert_not_called()
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_start_http_server_returned_false_port_is_down(
+ self, mock_run_cmd_remote, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of start_http_server
+ function when port 80 is down
+ """
+
+ cmd = "\'python -m SimpleHTTPServer 80 " + \
+ "> /dev/null 2>&1 &\'"
+
+ rcr_calls = [call(self.ip, cmd),
+ call(self.ip, 'ps aux | grep SimpleHTTPServer'),
+ call(self.ip, 'netstat -pntl | grep :80')]
+
+ log_calls = [call('output'),
+ call('Port 80 is not up yet')]
+
+ mock_run_cmd_remote.side_effect = [('', '', ''),
+ ('', 'output', ''),
+ ('', '', '')]
+
+ result = test_utils.start_http_server(self.ip, 1)
+ self.assertFalse(result)
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_sleep.assert_called_with(5)
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.debug.assert_has_calls(log_calls[1:])
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_start_http_server_returned_true(self,
+ mock_run_cmd_remote,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of start_http_server
+ function when the port 80 is up
+ """
+
+ cmd = "\'python -m SimpleHTTPServer 80 " + \
+ "> /dev/null 2>&1 &\'"
+
+ rcr_calls = [call(self.ip, cmd),
+ call(self.ip, 'ps aux | grep SimpleHTTPServer'),
+ call(self.ip, 'netstat -pntl | grep :80')]
+
+ log_calls = [call('output')]
+
+ mock_run_cmd_remote.side_effect = [('', '', ''),
+ ('', 'output', ''),
+ ('', 'output', '')]
+
+ self.assertTrue(test_utils.start_http_server(self.ip, 1))
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_sleep.assert_called_once_with(3)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_log.debug.assert_not_called()
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_start_vxlan_tool_returned_false(self,
+ mock_run_cmd_remote,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of start_vxlan_tool
+ function when no output is returned on ps command
+ """
+
+ mock_run_cmd_remote.side_effect = [('', 'output', ''),
+ ('', '', '')]
+
+ mock_rc = 'nohup python /root/vxlan_tool.py --do ' + \
+ 'forward --interface eth0 > /dev/null 2>&1 &'
+
+ rcr_calls = [call(self.ip, mock_rc),
+ call(self.ip, 'ps aux | grep vxlan_tool')]
+
+ log_calls = [call('Failed to start the vxlan tool')]
+
+ self.assertFalse(test_utils.start_vxlan_tool(self.ip))
+ mock_sleep.assert_called_once_with(3)
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_start_vxlan_tool_returned_output(self,
+ mock_run_cmd_remote,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of start_vxlan_tool
+ function when output is returned on ps command
+ """
+
+ mock_run_cmd_remote.side_effect = [('', 'output', ''),
+ ('', 'output', '')]
+
+ mock_rc = 'nohup python /root/vxlan_tool.py --do ' + \
+ 'forward --interface eth0 > /dev/null 2>&1 &'
+
+ rcr_calls = [call(self.ip, mock_rc),
+ call(self.ip, 'ps aux | grep vxlan_tool')]
+
+ self.assertIsNotNone(test_utils.start_vxlan_tool(self.ip))
+ mock_sleep.assert_called_once_with(3)
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_stop_vxlan_tool(self, mock_run_cmd_remote):
+ """
+ Checks the proper functionality of stop_vxlan_tool
+ function
+ """
+
+ mock_rc = 'pkill -f vxlan_tool.py'
+ test_utils.stop_vxlan_tool(self.ip)
+ mock_run_cmd_remote.assert_called_once_with(self.ip, mock_rc)
+
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_netcat(self,
+ mock_run_cmd_remote,
+ mock_log):
+ """
+ Checks the proper functionality of netcat
+ function
+ """
+
+ dest_ip = 'mock_destination_ip'
+ c = 'nc -z -w 5 %s 1234' % dest_ip
+ log_calls = [call('Running [%s] from [%s] returns [0]' % (c, self.ip))]
+ mock_run_cmd_remote.return_value = (0, '', '')
+ result = test_utils.netcat(self.ip, dest_ip, 1234)
+ self.assertEqual(result, 0)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.test_utils.netcat', autospec=True)
+ def test_is_ssh_blocked_returned_false(self, mock_netcat):
+ """
+ Checks the proper funcitonality of is_ssh_blocked
+ function when the returncode is zero
+ """
+
+ dest_ip = 'mock_destination_ip'
+ nc_calls = [call('10.10.10.10',
+ 'mock_destination_ip',
+ destination_port='22',
+ source_port=None)]
+
+ mock_netcat.return_value = 0
+ self.assertFalse(test_utils.is_ssh_blocked(self.ip, dest_ip))
+ mock_netcat.assert_has_calls(nc_calls)
+
+ @patch('sfc.lib.test_utils.netcat', autospec=True)
+ def test_is_ssh_blocked_returned_true(self, mock_netcat):
+ """
+ Checks the proper funcitonality of is_ssh_blocked
+ function when the returncode is non-zero integer
+ """
+
+ dest_ip = 'mock_destination_ip'
+ nc_calls = [call('10.10.10.10',
+ 'mock_destination_ip',
+ destination_port='22',
+ source_port=None)]
+
+ mock_netcat.return_value = 1
+ self.assertTrue(test_utils.is_ssh_blocked(self.ip, dest_ip))
+ mock_netcat.assert_has_calls(nc_calls)
+
+ @patch('sfc.lib.test_utils.netcat', autospec=True)
+ def test_is_http_blocked_returned_false(self, mock_netcat):
+ """
+ Checks the proper funcitonality of is_http_blocked
+ function when the returncode is zero
+ """
+
+ dest_ip = 'mock_destination_ip'
+ nc_calls = [call('10.10.10.10',
+ 'mock_destination_ip',
+ destination_port='80',
+ source_port=None)]
+
+ mock_netcat.return_value = 0
+ self.assertFalse(test_utils.is_http_blocked(self.ip, dest_ip))
+ mock_netcat.assert_has_calls(nc_calls)
+
+ @patch('sfc.lib.test_utils.netcat', autospec=True)
+ def test_is_http_blocked_returned_true(self, mock_netcat):
+ """
+ Checks the proper funcitonality of is_http_blocked
+ function when the returncode is non-zero integer
+ """
+
+ dest_ip = 'mock_destination_ip'
+ nc_calls = [call('10.10.10.10',
+ 'mock_destination_ip',
+ destination_port='80',
+ source_port=None)]
+
+ mock_netcat.return_value = 1
+ self.assertTrue(test_utils.is_http_blocked(self.ip, dest_ip))
+ mock_netcat.assert_has_calls(nc_calls)
+
+ @patch('time.strftime', autospec=True)
+ @patch('opnfv.utils.ovs_logger.OVSLogger', autospec=True)
+ def test_capture_ovs_logs(self,
+ mock_ovs_log,
+ mock_strftime):
+ """
+ Checks the proper functionality of capture_ovs_logs
+ function
+ """
+
+ log_calls = [call('controller_clients',
+ 'compute_clients',
+ 'error',
+ 'date_time')]
+
+ mock_strftime.return_value = 'date_time'
+ test_utils.capture_ovs_logs(mock_ovs_log,
+ 'controller_clients',
+ 'compute_clients',
+ 'error')
+
+ mock_strftime.assert_called_once_with('%Y%m%d-%H%M%S')
+ mock_ovs_log.dump_ovs_logs.assert_has_calls(log_calls)
+
+ def test_get_ssh_clients(self):
+ """
+ Checks the proper functionality of get_ssh_clients
+ fucntion
+ """
+
+ mock_node_obj_one = Mock()
+ mock_node_obj_two = Mock()
+ mock_node_obj_one.ssh_client = 'ssh_client_one'
+ mock_node_obj_two.ssh_client = 'ssh_client_two'
+ nodes = [mock_node_obj_one, mock_node_obj_two]
+ result = test_utils.get_ssh_clients(nodes)
+ self.assertEqual(result, ['ssh_client_one', 'ssh_client_two'])
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_check_ssh_returned_false(self,
+ mock_run_cmd_remote,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of check_ssh
+ fucntion when few VNFs can't establish SSH connectivity
+ """
+
+ ips = ["ip_address-1",
+ "ip_address-2"]
+
+ rcr_calls = [call(ips[0], 'exit'),
+ call(ips[1], 'exit')]
+
+ log_calls = [call('Checking SSH connectivity ' +
+ 'to the SFs with ips %s' % str(ips))]
+
+ mock_run_cmd_remote.side_effect = [(1, '', ''),
+ (0, '', '')]
+
+ self.assertFalse(test_utils.check_ssh(ips, retries=1))
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_sleep.assert_called_once_with(3)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_check_ssh_returned_true(self,
+ mock_run_cmd_remote,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of check_ssh
+ fucntion when all VNFs can establish SSH connectivity
+ """
+
+ ips = ["ip_address-1",
+ "ip_address-2"]
+
+ rcr_calls = [call(ips[0], 'exit'),
+ call(ips[1], 'exit')]
+
+ log_calls = [call('Checking SSH connectivity to ' +
+ 'the SFs with ips %s' % str(ips)),
+ call('SSH connectivity to the SFs established')]
+
+ mock_run_cmd_remote.side_effect = [(0, '', ''),
+ (0, '', '')]
+
+ self.assertTrue(test_utils.check_ssh(ips, retries=1))
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_sleep.assert_not_called()
+
+ def test_fill_installer_dict(self):
+ """
+ Checks the proper functionality of fill_installer_dict
+ function
+ """
+
+ installer_type = 'mock_installer'
+ installer_yaml_fields = {
+ 'user': 'defaults.installer.mock_installer.user',
+ 'password': 'defaults.installer.mock_installer.password',
+ 'cluster': 'defaults.installer.mock_installer.cluster',
+ 'pkey_file': 'defaults.installer.mock_installer.pkey_file'
+ }
+ result = test_utils.fill_installer_dict(installer_type)
+ self.assertDictEqual(result, installer_yaml_fields)
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 00000000..363f51dd
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,8 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+coverage!=4.4 # Apache-2.0
+mock # BSD
+nose # LGPL
+yamllint
+pylint
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..c359c547
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,58 @@
+[tox]
+envlist = docs,docs-linkcheck,py27,yamllint,pylint
+skipsdist = True
+
+[testenv]
+usedevelop = False
+setenv=
+ HOME = {envtmpdir}
+ PYTHONPATH = {toxinidir}
+deps =
+ -chttps://raw.githubusercontent.com/openstack/requirements/stable/queens/upper-constraints.txt
+ -chttps://git.opnfv.org/functest/plain/upper-constraints.txt?h=master
+ -r{toxinidir}/test-requirements.txt
+ -r{toxinidir}/requirements.txt
+install_command = pip install {opts} {packages}
+
+[testenv:docs]
+deps = -r{toxinidir}/docs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+deps = -r{toxinidir}/docs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs {toxinidir}/docs/_build/linkcheck
+
+[testenv:py27]
+commands = nosetests --with-xunit \
+ --with-coverage \
+ --cover-tests \
+ --cover-package=sfc \
+ --cover-xml \
+ --cover-html \
+ sfc/unit_tests/unit
+
+[testenv:yamllint]
+basepython = python2.7
+files =
+ docs
+ sfc/tests/functest
+commands =
+ yamllint -s {[testenv:yamllint]files}
+
+[testenv:pylint]
+basepython = python2.7
+commands = pylint --rcfile=tox.ini sfc
+
+# pylintrc
+[MESSAGES CONTROL]
+disable=all
+
+enable=F,E,unreachable,duplicate-key,unnecessary-semicolon,
+ global-variable-not-assigned,unused-variable,binary-op-exception,
+ bad-format-string,anomalous-backslash-in-string,bad-open-mode
+
+[TYPECHECK]
+ignored-classes=Connection