summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--INFO4
-rw-r--r--INFO.yaml79
-rw-r--r--docs/_static/favicon.icobin0 -> 15086 bytes
-rw-r--r--docs/_static/logo.pngbin0 -> 2829 bytes
-rw-r--r--docs/conf.py1
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/development/overview/index.rst468
-rw-r--r--docs/index.rst24
-rw-r--r--docs/release/installation/index.rst167
-rw-r--r--docs/release/release-notes/index.rst4
-rw-r--r--docs/release/release-notes/release-notes.rst86
-rw-r--r--docs/release/scenarios/os-odl-bgpvpn/index.rst17
-rw-r--r--docs/release/scenarios/os-odl-bgpvpn/scenario.description.rst40
-rw-r--r--docs/requirements.txt2
-rwxr-xr-x[-rw-r--r--]odl-pipeline/lib/odl_reinstaller.sh20
-rw-r--r--odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py68
-rwxr-xr-xodl-pipeline/lib/test_environment/test_environment.py1
-rwxr-xr-xodl-pipeline/lib/tripleo_introspector/tripleo_introspector.py1
-rwxr-xr-xodl-pipeline/lib/utils/processutils.py1
-rw-r--r--requirements.txt12
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/README23
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/ansible-role-requirements-pike.yml223
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/ha/openstack_user_config.yml256
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/ha/user_variables_os-odl-bgpvpn-ha.yml22
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/mini/openstack_user_config.yml171
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/noha/openstack_user_config.yml173
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/group_vars/quagga_all.yml46
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/defaults/repo-packages/opendaylight.yml9
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/inventory/env.d/neutron.yml87
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/inventory/env.d/quagga.yml19
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/os-setup-bgp-odl.yml77
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/setup-openstack.yml29
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/user_variables_os-odl-bgpvpn.yml52
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/tasks/add-inventory-files-pike.yml16
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/tasks/add-osa-files-pike.yml16
-rw-r--r--scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/tasks/main.yml39
-rw-r--r--scenarios/os-odl-bgpvpn/xci_overrides7
-rw-r--r--sdnvpn/artifacts/quagga_setup.sh120
-rw-r--r--sdnvpn/artifacts/testcase_1bis.yaml234
-rw-r--r--sdnvpn/artifacts/testcase_2bis.yaml289
-rw-r--r--sdnvpn/artifacts/testcase_4bis.yaml247
-rw-r--r--sdnvpn/artifacts/testcase_8bis.yaml173
-rw-r--r--sdnvpn/artifacts/testcase_8bis_upd.yaml17
-rw-r--r--sdnvpn/lib/config.py24
-rw-r--r--sdnvpn/lib/gather_logs.py11
-rw-r--r--sdnvpn/lib/openstack_utils.py1455
-rw-r--r--sdnvpn/lib/quagga.py33
-rw-r--r--sdnvpn/lib/results.py29
-rw-r--r--sdnvpn/lib/utils.py665
-rwxr-xr-xsdnvpn/sh_utils/fetch-log-script.sh12
-rw-r--r--sdnvpn/test/functest/config.yaml396
-rw-r--r--sdnvpn/test/functest/run_sdnvpn_tests.py118
-rw-r--r--sdnvpn/test/functest/tempest.py92
-rw-r--r--sdnvpn/test/functest/testcase_1.py93
-rw-r--r--sdnvpn/test/functest/testcase_10.py105
-rw-r--r--sdnvpn/test/functest/testcase_11.py39
-rw-r--r--sdnvpn/test/functest/testcase_12.py45
-rw-r--r--sdnvpn/test/functest/testcase_13.py119
-rw-r--r--sdnvpn/test/functest/testcase_1bis.py209
-rw-r--r--sdnvpn/test/functest/testcase_2.py48
-rw-r--r--sdnvpn/test/functest/testcase_2bis.py188
-rw-r--r--sdnvpn/test/functest/testcase_3.py328
-rw-r--r--sdnvpn/test/functest/testcase_4.py99
-rw-r--r--sdnvpn/test/functest/testcase_4bis.py215
-rw-r--r--sdnvpn/test/functest/testcase_7.py43
-rw-r--r--sdnvpn/test/functest/testcase_8.py144
-rw-r--r--sdnvpn/test/functest/testcase_8bis.py176
-rw-r--r--sdnvpn/test/functest/testcase_9.py21
-rw-r--r--setup.cfg5
-rw-r--r--test-requirements.txt5
-rw-r--r--tox.ini53
72 files changed, 6645 insertions, 1471 deletions
diff --git a/.gitignore b/.gitignore
index 332a121..444a0cb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,7 +3,6 @@
*.pyc
*~
.*.sw?
-/docs_build/
/docs_output/
/releng/
*.tar.gz
@@ -13,3 +12,5 @@ odl-pipeline/build/*
odl-pipeline/trash/*
odl-pipeline/lib/tmp
odl-pipeline/disks/*
+.tox
+docs/_build/*
diff --git a/INFO b/INFO
index a13a787..a78f570 100644
--- a/INFO
+++ b/INFO
@@ -2,8 +2,8 @@ Project: SDN Distributed Routing and VPN
Project Creation Date: September 1st, 2015
Project Category: Collaborative Development
Lifecycle State: Incubation
-Primary Contact: Tim Irnich (tim.irnich@ericsson.com)
-Project Lead: Tim Irnich (tim.irnich@ericsson.com)
+Primary Contact: Periyasamy Palanisamy (periyasamy.palanisamy@ericsson.com)
+Project Lead: Periyasamy Palanisamy (periyasamy.palanisamy@ericsson.com)
Jira Project Name: SDN VPN
Jira Project Prefix: sdnvpn
Mailing list tag: [sdnvpn]
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 0000000..3968ad8
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,79 @@
+---
+project: 'SDN Distributed Routing and VPN'
+project_creation_date: 'September 1st, 2015'
+project_category: 'Collaborative Development'
+lifecycle_state: 'Incubation'
+project_lead: &opnfv_sdnvpn_ptl
+ name: 'Tim Irnich'
+ email: 'tim.irnich@ericsson.com'
+ id: 'timirnich'
+ company: 'ericsson.com'
+ timezone: 'Unknown'
+primary_contact: *opnfv_sdnvpn_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.opnfv.org/projects/sdnvpn'
+ key: 'sdnvpn'
+mailing_list:
+ type: 'mailman2'
+ url: 'opnfv-tech-discuss@lists.opnfv.org'
+ tag: '[sdnvpn]'
+realtime_discussion:
+ type: irc
+ server: 'freenode.net'
+ channel: '#opnfv-sdnvpn'
+meetings:
+ - type: 'gotomeeting+irc'
+ agenda: # eg: 'https://wiki.opnfv.org/display/'
+ url: # eg: 'https://global.gotomeeting.com/join/819733085'
+ server: 'freenode.net'
+ channel: '#opnfv-meeting'
+ repeats: 'weekly'
+ time: # eg: '16:00 UTC'
+repositories:
+ - 'sdnvpn'
+committers:
+ - <<: *opnfv_sdnvpn_ptl
+ - name: 'Prem Sankar Gopannan'
+ email: 'prem.sankar.g@ericsson.com'
+ company: 'ericsson.com'
+ id: 'premsankar74'
+ - name: 'Nikolas Hermanns'
+ email: 'nikolas.hermanns@ericsson.com'
+ company: 'ericsson.com'
+ id: 'enikher'
+ - name: 'Jose Lausuch'
+ email: 'jalausuch@suse.com'
+ company: 'suse.com'
+ id: 'jose.lausuch'
+ - name: 'Thomas Morin'
+ email: 'thomas.morin@orange.com'
+ company: 'orange.com'
+ id: 'tmmorin'
+ - name: 'Thomas Sounapoglou'
+ email: 'soth@intracom-telecom.com'
+ company: 'intracom-telecom.com'
+ id: 'tomsou'
+ - name: 'Periyasamy Palanisamy'
+ email: 'periyasamy.palanisamy@ericsson.com'
+ company: 'ericsson.com'
+ id: 'pperiyasamy'
+ - name: 'Periyasamy Palanisamy'
+ email: 'periyasamy.palanisamy@ericsson.com'
+ company: 'ericsson.com'
+ id: 'pperiyasamy'
+ - name: 'Nikos Karandreas'
+ email: 'nick@intracom-telecom.com'
+ company: 'intracom-telecom.com'
+ id: 'nick_kar'
+ - name: 'Dimitrios Tsiolakis'
+ email: 'dmts@intracom-telecom.com'
+ company: 'intracom-telecom.com'
+ id: 'dimitris_'
+tsc:
+ # yamllint disable rule:line-length
+ approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-09-01-13.59.html'
+ # yamllint enable rule:line-length
+ changes:
+ - type: 'promotion'
+ link: '(Helpdesk#26575)'
diff --git a/docs/_static/favicon.ico b/docs/_static/favicon.ico
new file mode 100644
index 0000000..bbe55ab
--- /dev/null
+++ b/docs/_static/favicon.ico
Binary files differ
diff --git a/docs/_static/logo.png b/docs/_static/logo.png
new file mode 100644
index 0000000..1519503
--- /dev/null
+++ b/docs/_static/logo.png
Binary files differ
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..eb12e74
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1 @@
+from docs_conf.conf import * # noqa: F401,F403
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 0000000..4175c7c
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: SDNVPN
diff --git a/docs/development/overview/index.rst b/docs/development/overview/index.rst
index e932f9a..1127130 100644
--- a/docs/development/overview/index.rst
+++ b/docs/development/overview/index.rst
@@ -1,20 +1,14 @@
-.. _sdnvpn-overview:
-
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) Tim Irnich, (tim.irnich@ericsson.com) and others
+.. SPDX-License-Identifier: CC-BY-4.0
+.. (c) OPNFV, Ericsson AB and others.
=======
SDN VPN
=======
-A high-level description of the scenarios is provided in this section.
-For details of the scenarios and their provided capabilities refer to
-the scenario description document:
-http://artifacts.opnfv.org/danube/sdnpvn/scenarios/os-odl_l2-bgpvpn/index.html
-
The BGPVPN feature enables creation of BGP VPNs on the Neutron API according to the OpenStack
-BGPVPN blueprint at https://blueprints.launchpad.net/neutron/+spec/neutron-bgp-vpn.
+BGPVPN blueprint at `Neutron Extension for BGP Based VPN <https://blueprints.launchpad.net/neutron/+spec/neutron-bgp-vpn>`_.
+
In a nutshell, the blueprint defines a BGPVPN object and a number of ways
how to associate it with the existing Neutron object model, as well as a unique
definition of the related semantics. The BGPVPN framework supports a backend
@@ -26,238 +20,222 @@ implementation through the ODL NetVirt project.
SDNVPN Testing Suite
====================
-An overview of the SDNVPN Test is depicted here. More details for each test case are provided:
-https://wiki.opnfv.org/display/sdnvpn/SDNVPN+Testing
-
- BGPVPN Tempest test cases
- Create BGPVPN passes
- Create BGPVPN as non-admin fails
- Delete BGPVPN as non-admin fails
- Show BGPVPN as non-owner fails
- List BGPVPNs as non-owner fails
- Show network associated BGPVPNs as non-owner fails
- List network associated BGPVPNs as non-owner fails
- Associate/Deassociate a network to a BGPVPN resource passes
- Update route targets on a BGPVPN passes
- Update route targets on a BGPVPN as non-admin fails
- Reject the creation of BGPVPN with invalid route targets passes
- Reject the update of BGPVPN with invalid route targets passes
- Reject the association on an invalid network to a BGPVPN passes
- Reject the diassociation on an invalid network to a BGPVPN passes
- Associate/Deassociate a router to a BGPVPN resource passes
- Attach the subnet of an associated network to an associated router of the same BGVPN passes
-
-
-
- Functest scenario specific tests:
-
- Test Case 1 - VPN provides connectivity between subnets, using network association
- Name: VPN connecting Neutron networks and subnets
- Description: VPNs provide connectivity across Neutron networks and subnets if configured accordingly.
-
- Test setup procedure:
- Set up VM1 and VM2 on Node1 and VM3 on Node2, all having ports in the same Neutron Network N1
- Moreover all ports have 10.10.10/24 addresses (this subnet is denoted SN1 in the following)
- Set up VM4 on Node1 and VM5 on Node2, both having ports in Neutron Network N2
- Moreover all ports have 10.10.11/24 addresses (this subnet is denoted SN2 in the following)
-
- Test execution:
- Create VPN1 with eRT<>iRT (so that connected subnets should not reach each other)
- Associate SN1 to VPN1
- Ping from VM1 to VM2 should work
- Ping from VM1 to VM3 should work
- Ping from VM1 to VM4 should not work
- Associate SN2 to VPN1
- Ping from VM4 to VM5 should work
- Ping from VM1 to VM4 should not work (disabled until isolation fixed upstream)
- Ping from VM1 to VM5 should not work (disabled until isolation fixed upstream)
- Change VPN 1 so that iRT=eRT
- Ping from VM1 to VM4 should work
- Ping from VM1 to VM5 should work
-
- Test Case 2 - tenant separation
- Name: Using VPNs for tenant separation
- Description: Using VPNs to isolate tenants so that overlapping IP address ranges can be used
-
- Test setup procedure:
- Set up VM1 and VM2 on Node1 and VM3 on Node2, all having ports in the same Neutron Network N1.
- VM1 and VM2 have IP addresses in a subnet SN1 with range 10.10.10/24
- VM1: 10.10.10.11, running an HTTP server which returns "I am VM1" for any HTTP request
- (or something else than an HTTP server)
- VM2: 10.10.10.12, running an HTTP server which returns "I am VM2" for any HTTP request
- VM3 has an IP address in a subnet SN2 with range 10.10.11/24
- VM3: 10.10.11.13, running an HTTP server which returns "I am VM3" for any HTTP request
- Set up VM4 on Node1 and VM5 on Node2, both having ports in Neutron Network N2
- VM4 has an address in a subnet SN1b with range 10.10.10/24
- VM4: 10.10.10.12 (the same as VM2), running an HTTP server which returns "I am VM4" for any HTTP request
- VM5 has an address in a subnet SN2b with range 10.10.11/24
- VM5: 10.10.11.13 (the same as VM3), running an HTTP server which returns "I am VM5" for any HTTP request
-
- Test execution:
- Create VPN 1 with iRT=eRT=RT1 and associate N1 to it
- HTTP from VM1 to VM2 and VM3 should work
- It returns "I am VM2" and "I am VM3" respectively
- HTTP from VM1 to VM4 and VM5 should not work
- It never returns "I am VM4" or "I am VM5"
- Create VPN2 with iRT=eRT=RT2 and associate N2 to it
- HTTP from VM4 to VM5 should work
- It returns "I am VM5"
- HTTP from VM4 to VM1 and VM3 should not work
- It never returns "I am VM1" or "I am VM3"
-
-
- Test Case 3 - Data Center Gateway integration
- Name: Data Center Gateway integration
- Description: Investigate the peering functionality of BGP protocol,
- using a Zrpcd/Quagga router and OpenDaylight Controller
-
- Test setup procedure:
- Search in the pool of nodes and find one Compute node and one Controller nodes, that have OpenDaylight controller running
- Start an instance using ubuntu-16.04-server-cloudimg-amd64-disk1.img image and in it run the Quagga setup script
- Start bgp router in the Controller node, using odl:configure-bgp
-
- Test execution:
- Set up a Quagga instance in a nova compute node
- Start a BGP router with OpenDaylight in a controller node
- Add the Quagga running in the instance as a neighbor
- Check that bgpd is running
- Verify that the OpenDaylight and gateway Quagga peer each other
- Start an instance in a second nova compute node and connect it with a new network, (Network 3-3).
- Create a bgpvpn (include parameters route-distinguisher and route-targets) and associate it with the network created
- Define the same route-distinguisher and route-targets on the simulated quagga side
- Check that the routes from the Network 3-3 are advertised towards simulated Quagga VM
-
- Test Case 4 - VPN provides connectivity between subnets using router association
- Functest: variant of Test Case 1.
- Set up a Router R1 with one connected network/subnet N1/S1.
- Set up a second network N2.
- Create VPN1 and associate Router R1 and Network N2 to it.
- Hosts from N2 should be able to reach hosts in N1.
-
- Name: VPN connecting Neutron networks and subnets using router association
- Description: VPNs provide connectivity across Neutron networks and subnets if configured accordingly.
-
- Test setup procedure:
- Set up VM1 and VM2 on Node1 and VM3 on Node2,
- All VMs have ports in the same Neutron Network N1 and 10.10.10/24 addresses
- (this subnet is denoted SN1 in the following).
- N1/SN1 are connected to router R1.
- Set up VM4 on Node1 and VM5 on Node2,
- Both VMs have ports in Neutron Network N2 and having 10.10.11/24 addresses
- (this subnet is denoted SN2 in the following)
-
- Test execution:
- Create VPN1 with eRT<>iRT (so that connected subnets should not reach each other)
- Associate R1 to VPN1
- Ping from VM1 to VM2 should work
- Ping from VM1 to VM3 should work
- Ping from VM1 to VM4 should not work
- Associate SN2 to VPN1
- Ping from VM4 to VM5 should work
- Ping from VM1 to VM4 should not work
- Ping from VM1 to VM5 should not work
- Change VPN1 so that iRT=eRT
- Ping from VM1 to VM4 should work
- Ping from VM1 to VM5 should work
-
- Test Case 7 - Network associate a subnet with a router attached to a VPN and
- verify floating IP functionality (disabled, because of ODL Bug 6962)
-
- A test for https://bugs.opendaylight.org/show_bug.cgi?id=6962
-
- Setup procedure:
- Create VM1 in a subnet with a router attached.
- Create VM2 in a different subnet with another router attached.
- Network associate them to a VPN with iRT=eRT
- Ping from VM1 to VM2 should work
- Assign a floating IP to VM1
- Pinging the floating IP should work
-
- Test Case 8 - Router associate a subnet with a router attached to a VPN and
- verify floating IP functionality
-
- Setup procedure:
- Create VM1 in a subnet with a router which is connected with the gateway
- Create VM2 in a different subnet without a router attached.
- Assoc the two networks in a VPN iRT=eRT
- One with router assoc, other with net assoc
- Try to ping from one VM to the other
- Assign a floating IP to the VM in the router assoc network
- Ping it
-
- Test Case 9 - Check fail mode in OVS br-int interfaces
- This testcase checks if the fail mode is always “secure”.
- To accomplish it, a check is performed on all OVS br-int interfaces, for all OpenStack nodes.
- The testcase is considered as successful if all OVS br-int interfaces have fail_mode=secure
-
-
- Test Case 10 - Check the communication between a group of VMs
- This testcase investigates if communication between a group of VMs is interrupted upon deletion
- and creation of VMs inside this group.
-
- Test case flow:
- Create 3 VMs: VM_1 on compute 1, VM_2 on compute 1, VM_3 on compute 2.
- All VMs ping each other.
- VM_2 is deleted.
- Traffic is still flying between VM_ 1 and VM_3.
- A new VM, VM_ 4 is added to compute 1.
- Traffic is not interrupted and VM_4 can be reached as well.
-
-
- Testcase 11: test Opendaylight resync and group_add_mod feature mechanisms
- This is testcase to test Opendaylight resync and group_add_mod feature functionalities
-
- Sub-testcase 11-1:
- Create and start 2 VMs, connected to a common Network.
- New groups should appear in OVS dump
- OVS disconnects and the VMs and the networks are cleaned.
- The new groups are still in the OVS dump,
- cause OVS is not connected anymore, so it is not notified that the groups are deleted
- OVS re-connects.
- The new groups should be deleted, as Opendaylight has to resync the groups totally and
- should remove the groups since VMS are deleted.
-
- Sub-testcase 11-2:
- Create and start 2 VMs, connected to a common Network.
- New groups should appear in OVS dump
- OVS disconnects.
- The new groups are still in the OVS dump, cause OVS is not connected anymore,
- so it is not notified that the groups are deleted
- OVS re-connects.
- The new groups should be still there, as the topology remains. Opendaylight Carbon's
- group_add_mod mechanism should handle the already existing group.
- OVS re-connects.
- The new groups should be still there, as the topology remains.
- Opendaylight Carbon’ group_add_mod mechanism should handle the already existing group.
-
- Testcase 12: Test Resync mechanism between Opendaylight and OVS
- This is the testcase to validate flows and groups are programmed correctly
- after resync which is triggered by OVS del-controller/set-controller commands
- and adding/remove iptables drop rule on OF port 6653.
-
- Sub-testcase 12-1:
- Create and start 2 VMs, connected to a common Network
- New flows and groups were added to OVS
- Reconnect the OVS by running del-ontroller and set-controller commands
- The flows and groups are still intact and none of the flows/groups
- are removed
- Reconnect the OVS by adding ip tables drop rule and then remove it
- The flows and groups are still intact and none of the flows/groups
- are removed
-
- Testcase 13: Test ECMP (Equal-cost multi-path routing) for the extra route
- This testcase validates spraying behavior in OvS when an extra route is
- configured such that it can be reached from two nova VMs in the
- same network.
-
- Setup procedure:
- Create and start VM1 and VM2 configured with sub interface set to same ip
- address in both VMs, connected to a common network/router.
- Update the VM1 and VM2's Neutron ports with allowed address pairs for sub
- interface ip/mac addresses.
- Create BGPVPN with two route distinguishers.
- Associate router with BGPVPN.
- Update the router with above sub-interface ip address with nexthops set to
- VMs ip addresses.
- Create VM3 and connected to the same network.
- Ping sub-interface IP address from VM3.
+An overview of the SDNVPN Test is depicted here. A more detailed description of each test case can
+be found at `SDNVPN Testing <https://wiki.opnfv.org/display/sdnvpn/SDNVPN+Testing>`_.
+
+Functest scenario specific tests
+""""""""""""""""""""""""""""""""""
+- **Test Case 1**: VPN provides connectivity between subnets, using network association
+
+ Name: VPN connecting Neutron networks and subnets
+ Description: VPNs provide connectivity across Neutron networks and subnets if configured accordingly.
+ Test setup procedure:Set up VM1 and VM2 on Node1 and VM3 on Node2, all having ports in the same Neutron Network N1
+
+ Moreover all ports have 10.10.10/24 addresses (this subnet is denoted SN1 in the following)
+ Set up VM4 on Node1 and VM5 on Node2, both having ports in Neutron Network N2
+ Moreover all ports have 10.10.11/24 addresses (this subnet is denoted SN2 in the following)
+
+ Test execution:
+ * Create VPN1 with eRT<>iRT (so that connected subnets should not reach each other)
+ * Associate SN1 to VPN1
+ * Ping from VM1 to VM2 should work
+ * Ping from VM1 to VM3 should work
+ * Ping from VM1 to VM4 should not work
+ * Associate SN2 to VPN1
+ * Ping from VM4 to VM5 should work
+ * Ping from VM1 to VM4 should not work (disabled until isolation fixed upstream)
+ * Ping from VM1 to VM5 should not work (disabled until isolation fixed upstream)
+ * Change VPN 1 so that iRT=eRT
+ * Ping from VM1 to VM4 should work
+ * Ping from VM1 to VM5 should work
+
+- **Test Case 2**: Tenant separation
+
+ Name: Using VPNs for tenant separation
+ Description: Using VPNs to isolate tenants so that overlapping IP address ranges can be used
+
+ Test setup procedure:
+ * Set up VM1 and VM2 on Node1 and VM3 on Node2, all having ports in the same Neutron Network N1.
+ * VM1 and VM2 have IP addresses in a subnet SN1 with range 10.10.10/24
+ * VM1: 10.10.10.11, running an HTTP server which returns "I am VM1" for any HTTP request (or something else than an HTTP server)
+ * VM2: 10.10.10.12, running an HTTP server which returns "I am VM2" for any HTTP request
+ * VM3 has an IP address in a subnet SN2 with range 10.10.11/24
+ * VM3: 10.10.11.13, running an HTTP server which returns "I am VM3" for any HTTP request
+ * Set up VM4 on Node1 and VM5 on Node2, both having ports in Neutron Network N2
+ * VM4 has an address in a subnet SN1b with range 10.10.10/24
+ * VM4: 10.10.10.12 (the same as VM2), running an HTTP server which returns "I am VM4" for any HTTP request
+ * VM5 has an address in a subnet SN2b with range 10.10.11/24
+ * VM5: 10.10.11.13 (the same as VM3), running an HTTP server which returns "I am VM5" for any HTTP request
+
+ Test execution:
+ * Create VPN 1 with iRT=eRT=RT1 and associate N1 to it
+ * HTTP from VM1 to VM2 and VM3 should work
+ It returns "I am VM2" and "I am VM3" respectively
+ * HTTP from VM1 to VM4 and VM5 should not work
+ It never returns "I am VM4" or "I am VM5"
+ * Create VPN2 with iRT=eRT=RT2 and associate N2 to it
+ * HTTP from VM4 to VM5 should work
+ It returns "I am VM5"
+ * HTTP from VM4 to VM1 and VM3 should not work
+ It never returns "I am VM1" or "I am VM3"
+
+
+- **Test Case 3**: Data Center Gateway integration
+
+ Name: Data Center Gateway integration
+ Description: Investigate the peering functionality of BGP protocol, using a Zrpcd/Quagga router
+ and OpenDaylight Controller
+
+ Test setup procedure:
+ * Search in the pool of nodes and find one Compute node and one Controller nodes, that have OpenDaylight controller running
+ * Start an instance using ubuntu-16.04-server-cloudimg-amd64-disk1.img image and in it run the Quagga setup script
+ * Start bgp router in the Controller node, using odl:configure-bgp
+
+ Test execution:
+ * Set up a Quagga instance in a nova compute node
+ * Start a BGP router with OpenDaylight in a controller node
+ * Add the Quagga running in the instance as a neighbor
+ * Check that bgpd is running
+ * Verify that the OpenDaylight and gateway Quagga peer each other
+ * Start an instance in a second nova compute node and connect it with a new network, (Network 3-3).
+ * Create a bgpvpn (include parameters route-distinguisher and route-targets) and associate it with the network created
+ * Define the same route-distinguisher and route-targets on the simulated quagga side
+ * Check that the routes from the Network 3-3 are advertised towards simulated Quagga VM
+
+- **Test Case 4**: VPN provides connectivity between subnets using router association
+
+ Functest: variant of Test Case 1.
+ * Set up a Router R1 with one connected network/subnet N1/S1.
+ * Set up a second network N2.
+ * Create VPN1 and associate Router R1 and Network N2 to it.
+ * Hosts from N2 should be able to reach hosts in N1.
+
+ Name: VPN connecting Neutron networks and subnets using router association
+ Description: VPNs provide connectivity across Neutron networks and subnets if configured accordingly.
+
+ Test setup procedure:
+ * Set up VM1 and VM2 on Node1 and VM3 on Node2,
+ * All VMs have ports in the same Neutron Network N1 and 10.10.10/24 addresses
+ * (this subnet is denoted SN1 in the following).
+ * N1/SN1 are connected to router R1.
+ * Set up VM4 on Node1 and VM5 on Node2,
+ * Both VMs have ports in Neutron Network N2 and having 10.10.11/24 addresses
+ * (this subnet is denoted SN2 in the following)
+
+ Test execution:
+ * Create VPN1 with eRT<>iRT (so that connected subnets should not reach each other)
+ * Associate R1 to VPN1
+ Ping from VM1 to VM2 should work
+ Ping from VM1 to VM3 should work
+ Ping from VM1 to VM4 should not work
+ * Associate SN2 to VPN1
+ Ping from VM4 to VM5 should work
+ Ping from VM1 to VM4 should not work
+ Ping from VM1 to VM5 should not work
+ * Change VPN1 so that iRT=eRT
+ Ping from VM1 to VM4 should work
+ Ping from VM1 to VM5 should work
+
+- **Test Case 7** - Network associate a subnet with a router attached to a VPN and verify floating IP
+ functionality (disabled, because of ODL Bug 6962)
+
+ A test for https://bugs.opendaylight.org/show_bug.cgi?id=6962
+
+ Setup procedure:
+ * Create VM1 in a subnet with a router attached.
+ * Create VM2 in a different subnet with another router attached.
+ * Network associate them to a VPN with iRT=eRT
+ * Ping from VM1 to VM2 should work
+ * Assign a floating IP to VM1
+ * Pinging the floating IP should work
+
+- **Test Case 8** - Router associate a subnet with a router attached to a VPN and
+ verify floating IP functionality
+
+ Setup procedure:
+ * Create VM1 in a subnet with a router which is connected with the gateway
+ * Create VM2 in a different subnet without a router attached.
+ * Assoc the two networks in a VPN iRT=eRT
+ * One with router assoc, other with net assoc
+ * Try to ping from one VM to the other
+ * Assign a floating IP to the VM in the router assoc network
+ * Ping it
+
+- **Test Case 9** - Check fail mode in OVS br-int interfaces
+
+ This testcase checks if the fail mode is always 'secure'.
+ To accomplish it, a check is performed on all OVS br-int interfaces, for all OpenStack nodes.
+ The testcase is considered as successful if all OVS br-int interfaces have fail_mode=secure
+
+- **Test Case 10** - Check the communication between a group of VMs
+
+ This testcase investigates if communication between a group of VMs is interrupted upon deletion
+ and creation of VMs inside this group.
+
+ Test case flow:
+ * Create 3 VMs: VM_1 on compute 1, VM_2 on compute 1, VM_3 on compute 2.
+ * All VMs ping each other.
+ * VM_2 is deleted.
+ * Traffic is still flying between VM_1 and VM_3.
+ * A new VM, VM_4 is added to compute 1.
+ * Traffic is not interrupted and VM_4 can be reached as well.
+
+
+- **Testcase 11**: test Opendaylight resync and group_add_mod feature mechanisms
+
+ This is testcase to test Opendaylight resync and group_add_mod feature functionalities
+
+ Sub-testcase 11-1:
+ * Create and start 2 VMs, connected to a common Network.
+ New groups should appear in OVS dump
+ * OVS disconnects and the VMs and the networks are cleaned.
+ The new groups are still in the OVS dump,
+ cause OVS is not connected anymore, so it is not notified that the groups are deleted
+ * OVS re-connects.
+ The new groups should be deleted, as Opendaylight has to resync the groups totally and
+ should remove the groups since VMS are deleted.
+
+ Sub-testcase 11-2:
+ * Create and start 2 VMs, connected to a common Network.
+ New groups should appear in OVS dump
+ * OVS disconnects.
+ The new groups are still in the OVS dump, cause OVS is not connected anymore,
+ so it is not notified that the groups are deleted
+ * OVS re-connects.
+ The new groups should be still there, as the topology remains. Opendaylight Carbon's
+ group_add_mod mechanism should handle the already existing group.
+ * OVS re-connects.
+ The new groups should be still there, as the topology remains.
+ Opendaylight Carbon’ group_add_mod mechanism should handle the already existing group.
+
+- **Testcase 12**: Test Resync mechanism between Opendaylight and OVS
+ This is the testcase to validate flows and groups are programmed correctly
+ after resync which is triggered by OVS del-controller/set-controller commands
+ and adding/remove iptables drop rule on OF port 6653.
+
+ Sub-testcase 12-1:
+ * Create and start 2 VMs, connected to a common Network
+ New flows and groups were added to OVS
+ * Reconnect the OVS by running del-ontroller and set-controller commands
+ The flows and groups are still intact and none of the flows/groups
+ are removed
+ * Reconnect the OVS by adding ip tables drop rule and then remove it
+ The flows and groups are still intact and none of the flows/groups
+ are removed
+
+- **Testcase 13**: Test ECMP (Equal-cost multi-path routing) for the extra route
+
+ This testcase validates spraying behavior in OvS when an extra route is
+ configured such that it can be reached from two nova VMs in the
+ same network.
+
+ Setup procedure:
+ * Create and start VM1 and VM2 configured with sub interface set to same ip address in both VMs,
+ connected to a common network/router.
+ * Update the VM1 and VM2's Neutron ports with allowed address pairs for sub interface ip/mac
+ addresses.
+ * Create BGPVPN with two route distinguishers.
+ * Associate router with BGPVPN.
+ * Update the router with above sub-interface ip address with nexthops set to VMs ip addresses.
+ * Create VM3 and connected to the same network.
+ * Ping sub-interface IP address from VM3.
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..d58d5d5
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,24 @@
+.. _sdnvpn:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+*********************************
+OPNFV SDNVPN
+*********************************
+
+.. toctree::
+ :numbered:
+ :maxdepth: 3
+
+ release/release-notes/index
+ release/configguide/index
+ release/userguide/index
+ release/installation/index
+ release/scenarios/os-odl-bgpvpn/index
+ development/requirements/index
+ development/overview/index
+ development/design/index
+
diff --git a/docs/release/installation/index.rst b/docs/release/installation/index.rst
index 78bdc8d..089bc55 100644
--- a/docs/release/installation/index.rst
+++ b/docs/release/installation/index.rst
@@ -1,8 +1,6 @@
-.. _sdnvpn-installation:
-
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) Tim Irnich, (tim.irnich@ericsson.com) and others
+.. SPDX-License-Identifier: CC-BY-4.0
+.. (c) OPNFV, Ericsson AB and others.
============================
SDN VPN feature installation
@@ -33,7 +31,9 @@ spec>.
When ODL is used as an SDN Controller in an OPNFV virtual deployment, ODL is
running on the OpenStack Controller VMs. It is therefore recommended to
-increase the amount of resources for these VMs.
+increase the amount of resources for these VMs. ODL is running in a separate
+VM in case of Fuel, thus, the below recommendation is not applicable when
+deploying the scenario on Fuel installer.
Our recommendation is to have 2 additional virtual cores and 8GB
additional virtual memory on top of the normally recommended
@@ -52,11 +52,11 @@ Installation using Fuel installer
Preparing the host to install Fuel by script
============================================
-.. Not all of these options are relevant for all scenarios. I advise following the
+.. Not all of these options are relevant for all scenarios. I advise following the
.. instructions applicable to the deploy tool used in the scenario.
-Before starting the installation of the os-odl-bgpnvp scenario some
-preparation of the machine that will host the Fuel VM must be done.
+Before starting the installation of the os-odl-bgpvpn-noha scenario the following
+preparation must be done on the machine that will host the Fuel VM.
Installation of required packages
@@ -66,17 +66,8 @@ Jumphost (or the host which serves the VMs for the virtual deployment) needs to
install the following packages:
::
- sudo apt-get install -y git make curl libvirt-bin libpq-dev qemu-kvm \
- qemu-system tightvncserver virt-manager sshpass \
- fuseiso genisoimage blackbox xterm python-pip \
- python-git python-dev python-oslo.config \
- python-pip python-dev libffi-dev libxml2-dev \
- libxslt1-dev libffi-dev libxml2-dev libxslt1-dev \
- expect curl python-netaddr p7zip-full
-
- sudo pip install GitPython pyyaml netaddr paramiko lxml scp \
- python-novaclient python-neutronclient python-glanceclient \
- python-keystoneclient debtcollector netifaces enum
+ sudo apt-get install -y git make curl libvirt-bin qemu-kvm \
+ python-pip python-dev
Download the source code and artifact
-------------------------------------
@@ -87,158 +78,58 @@ First of all the opnfv-fuel repository needs to be cloned:
git clone ssh://<user>@gerrit.opnfv.org:29418/fuel
-To check out a specific
-version of OPNFV, checkout the appropriate branch:
+To check out a specific version of OPNFV, checkout the appropriate branch:
::
cd fuel
- git checkout stable/<colorado|danube|euphrates>
-
-Now download the corresponding OPNFV Fuel ISO into an appropriate folder from
-the website
-::
- https://www.opnfv.org/software/downloads/release-archives
-
-Have in mind that the fuel repo version needs to map with the downloaded
-artifact. Note: it is also possible to build the Fuel image using the
-tools found in the fuel git repository, but this is out of scope of the
-procedure described here. Check the Fuel project documentation for more
-information on building the Fuel ISO.
-
+ git checkout stable/gambia
Simplified scenario deployment procedure using Fuel
===================================================
-This section describes the installation of the os-odl-bgpvpn-ha or
+This section describes the installation of the
os-odl-bgpvpn-noha OPNFV reference platform stack across a server cluster
or a single host as a virtual deployment.
-Scenario Preparation
---------------------
-dea.yaml and dha.yaml need to be copied and changed according to the lab-name/host
-where you deploy.
-Copy the full lab config from:
-::
-
- cp <path-to-opnfv-fuel-repo>/deploy/config/labs/devel-pipeline/elx \
- <path-to-opnfv-fuel-repo>/deploy/config/labs/devel-pipeline/<your-lab-name>
-
-Add at the bottom of dha.yaml
-::
-
- disks:
- fuel: 100G
- controller: 100G
- compute: 100G
-
- define_vms:
- controller:
- vcpu:
- value: 4
- memory:
- attribute_equlas:
- unit: KiB
- value: 16388608
- currentMemory:
- attribute_equlas:
- unit: KiB
- value: 16388608
-
-
-Check if the default settings in dea.yaml are in line with your intentions
-and make changes as required.
-
Installation procedures
-----------------------
-We describe several alternative procedures in the following.
-First, we describe several methods that are based on the deploy.sh script,
-which is also used by the OPNFV CI system.
-It can be found in the Fuel repository.
-
-In addition, the SDNVPN feature can also be configured manually in the Fuel GUI.
-This is described in the last subsection.
-
-Before starting any of the following procedures, go to
-::
-
- cd <opnfv-fuel-repo>/ci
-
-Full automatic virtual deployment High Availablity Mode
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The following command will deploy the high-availability flavor of SDNVPN scenario os-odl-bgpvpn-ha
-in a fully automatic way, i.e. all installation steps (Fuel server installation, configuration,
-node discovery and platform deployment) will take place without any further prompt for user input.
-::
-
- sudo bash ./deploy.sh -b file://<path-to-opnfv-fuel-repo>/config/ -l devel-pipeline -p <your-lab-name> -s os-odl_l2-bgpvpn-ha -i file://<path-to-fuel-iso>
+This chapter describes how to deploy the scenario with the use of deploy.sh script,
+which is also used by the OPNFV CI system. Script can be found in the Fuel
+repository.
Full automatic virtual deployment NO High Availability Mode
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The following command will deploy the SDNVPN scenario in its non-high-availability flavor (note the
-different scenario name for the -s switch). Otherwise it does the same as described above.
-::
-
- sudo bash ./deploy.sh -b file://<path-to-opnfv-fuel-repo>/config/ -l devel-pipeline -p <your-lab-name> -s os-odl_l2-bgpvpn-noha -i file://<path-to-fuel-iso>
-
-Automatic Fuel installation and manual scenario deployment
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-A useful alternative to the full automatic procedure is to only autodeploy the Fuel host and to run host selection, role assignment and SDNVPN scenario configuration manually.
-::
-
- sudo bash ./deploy.sh -b file://<path-to-opnfv-fuel-repo>/config/ -l devel-pipeline -p <your-lab-name> -s os-odl_l2-bgpvpn-ha -i file://<path-to-fuel-iso> -e
-
-With -e option the installer does not launch environment deployment, so
-a user can do some modification before the scenario is really deployed.
-Another interesting option is the -f option which deploys the scenario using an existing Fuel host.
-
-The result of this installation is a fuel sever with the right config for
-BGPVPN. Now the deploy button on fuel dashboard can be used to deploy the environment.
-It is as well possible to do the configuration manuell.
-
-Feature configuration on existing Fuel
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-If a Fuel server is already provided but the fuel plugins for Opendaylight, Openvswitch
-and BGPVPN are not provided install them by:
+The following command will deploy the SDNVPN scenario in its non-high-availability flavor.
::
- cd /opt/opnfv/
- fuel plugins --install fuel-plugin-ovs-*.noarch.rpm
- fuel plugins --install opendaylight-*.noarch.rpm
- fuel plugins --install bgpvpn-*.noarch.rpm
-
-If plugins are installed and you want to update them use --force flag.
-
-Now the feature can be configured. Create a new environment with "Neutron with ML2 plugin" and
-in there "Neutron with tunneling segmentation".
-Go to Networks/Settings/Other and check "Assign public network to all nodes". This is required for
-features such as floating IP, which require the Compute hosts to have public interfaces.
-Then go to settings/other and check "OpenDaylight plugin", "Use ODL to manage L3 traffic",
-"BGPVPN plugin" and set the OpenDaylight package version to "5.2.0-1". Then you should
-be able to check "BGPVPN extensions" in OpenDaylight plugin section.
-
-Now the deploy button on fuel dashboard can be used to deploy the environment.
+ ci/deploy.sh -l <lab_name> \
+ -p <pod_name> \
+ -b <URI to configuration repo containing the PDF file> \
+ -s os-odl-bgpvpn-noha \
+ -D \
+ -S <Storage directory for disk images> |& tee deploy.log
Virtual deployment using Apex installer
=======================================
Prerequisites
-^^^^^^^^^^^^^
+-------------
+
For Virtual Apex deployment a host with Centos 7 is needed. This installation
was tested on centos-release-7-2.1511.el7.centos.2.10.x86_64 however any other
Centos 7 version should be fine.
Build and Deploy
-^^^^^^^^^^^^^^^^
-Download the Apex repo from opnfv gerrit and checkout stable/danube:
+----------------
+
+Download the Apex repo from opnfv gerrit and checkout stable/gambia:
::
git clone ssh://<user>@gerrit.opnfv.org:29418/apex
cd apex
- git checkout stable/danube
+ git checkout stable/gambia
In apex/contrib you will find simple_deploy.sh:
::
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index 2b6664a..c7e07ee 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -4,9 +4,9 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) Nikolas Hermanns, (nikolas.hermanns@ericsson.com) and others
-==================
+=====================
SDN VPN release notes
-==================
+=====================
.. toctree::
:maxdepth: 3
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 9f4ad21..a5b671c 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -1,25 +1,28 @@
+.. _-os-odl-bgpvpn-noha:
+
+.. _-os-odl-bgpvpn-ha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. SPDX-License-Identifier: CC-BY-4.0
+.. (c) Periyasamy Palanisamy <periyasamy.palanisamy@ericsson.com> and others
+
=====================
SDN VPN Release Notes
=====================
-License
-=======
-
-This work is licensed under a Creative Commons Attribution 4.0 International
-License. .. http://creativecommons.org/licenses/by/4.0 ..
-(c) Tim Irnich (Ericsson) and others
Abstract
========
-This document comprises the release notes for the SDN VPN feature contained in the Euphrates
+This document comprises the release notes for the SDN VPN feature contained in the Gambia
release of OPNFV.
Important notes
===============
-In the Euphrates release, SDN VPN only supports ODL as a backend. Make sure to always deploy
-SDN VPN and ODL together. Make use of deployment scenarios including the SDNVPN feature such as os_odl_bgpvpn_{ha|noha}.
+In the Gambia release, SDN VPN only supports ODL as a backend. Make sure to always deploy
+SDN VPN and ODL together. Make use of deployment scenarios including the SDNVPN feature such
+as os_odl_bgpvpn_{ha|noha}.
Summary
=======
@@ -36,39 +39,31 @@ Release Data
| **Project** | sdnvpn |
| | |
+--------------------------------------+-------------------------------------------+
-| **Repo/tag** | Euhprates 1.0 |
+| **Repo/tag** | opnfv-7.1.0 |
| | |
+--------------------------------------+-------------------------------------------+
-| **Release designation** | Euphrates 1.0 - initial release |
+| **Release designation** | Gambia 7.1 |
| | |
+--------------------------------------+-------------------------------------------+
-| **Release date** | Oct 20 2017 |
+| **Release date** | Dec 14, 2018 |
| | |
+--------------------------------------+-------------------------------------------+
-| **Purpose of the delivery** | Rebased to new upstream versions |
-| | Removed Fuel deployment scenario |
-| | Couple of new test cases |
+| **Purpose of the delivery** | OPNFV Gambia 7.1 release |
| | |
+--------------------------------------+-------------------------------------------+
Version change
--------------
-Compared to the Colorado release, a new version of ODL including
-several critical bugfixes is deployed. Together with the new BGP
-stack, integration with Apex, the Horizon dashboards and bugfixes the
-user has even more features available. New testcases were added to
-functest to guarantee functionality.
+Compared to the Fraser release, functest testcases were enriched to guarantee functionality.
+Also several enhancements were added to improve testing efficiency.
Module version changes
~~~~~~~~~~~~~~~~~~~~~~
-ODL has been upgraded to Carbon SR2.
+.. ODL has been upgraded to Nitrogen.
Document changes
~~~~~~~~~~~~~~~~
-The previous monolithic user guide, which was also covering install and
-config, has been broken up into multiple documents.
-
Reason for version
------------------
@@ -80,7 +75,7 @@ SDN VPN adds the possibility to create and associate BGP/MPLS based
Virtual Private Networks (VPNs) through the OpenStack Neutron BGPVPN
API extension.
-There has been no functional scope change in the Euphrates release, the
+There has been no functional scope change in the Fraser release, the
main deliverable is newer upstream versions and additional test
coverage.
@@ -96,21 +91,23 @@ Deliverables
Software deliverables
~~~~~~~~~~~~~~~~~~~~~
-- Changes to Apex to enable a BGPVPN deployment and integration of Quagga BGP.
-- Integration of VPN Service functional tests and BGPVPN API tests into Functest framework.
-- Enabling performance tests in Yardstick.
-- Changes to 6Wind Zrpcd to enable integration with Apex.
+- Orchestrate BGPVPN with Openstack HEAT templates
+- Verify BGP route exchange with a peer in both directions
+- Support for ECMP load balancing
+- Consolidate image creation in Apex and Fuel
+- Remove the dependency between not running quagga and created flows
+- Delete ODL configuration after each test case run
+- Add BGPVPN scenarios to XCI and enable SDNVPN tests
+- Enable and test ODL clustering for bgpvpn-ha scenario
+
Documentation deliverables
~~~~~~~~~~~~~~~~~~~~~~~~~~
-- Paragraph on SDN VPN feature for platform overview
-
-- Configuration guide
-
-- User guide
-
+- Installation guide
- Release notes (this document)
+- Overview
+- Test scenario description
Known Limitations, Issues and Workarounds
=========================================
@@ -125,20 +122,19 @@ Known issues
Moving to the new NetVirt has caused a regression in which a subnet
cannot be both attached to a Router and Network associated to a VPN.
This has been worked around in the tests and the upstream bug is being
-tracked [0].
+tracked [0]_ and [2]_.
NAT for a VM which is in a private neutron network does not work. Instances
created in subnets that are connected to the public network via a gateway
should have external connectivity. This does not work and can be worked
-around by assigning a Floating IP to the instance [1].
+around by assigning a Floating IP to the instance [1]_.
Currently we observe non-deterministic failures of individual tests within the
SDNVPN section of the Functest suite, which are not reproducible in the development
environment. In a development environment all Functest tests are successful.
-Sporadic failures have been observed in test cases 1,4 and 8. Furthermore, the
+Sporadic failures have been observed in test cases 4 and 8. Furthermore, the
check of bgpd service running on Controller node, in test case 3, has a constant
-failure trend for Apex environment. Also for Apex environment we observe constant
-failure in refstack, during the server action test_reboot_server_hard [2].
+failure trend for Apex environment.
Workarounds
-----------
@@ -149,9 +145,6 @@ by not network associating subnets attached to routers.
The NAT issues are worked around by assigning floating IPs to VMs that require
external connectivity.
-For the failures observed in CI, no workaround is required since the faults were
-not reproducible in live deployments.[3]
-
Test results
============
@@ -161,7 +154,6 @@ with the exceptions described above.
References
==========
-[0] https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-94
-[1] https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-99
-[2] https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-172
-[3] https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-170
+.. [0] https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-94
+.. [1] https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-99
+.. [2] https://jira.opendaylight.org/browse/NETVIRT-932
diff --git a/docs/release/scenarios/os-odl-bgpvpn/index.rst b/docs/release/scenarios/os-odl-bgpvpn/index.rst
index b50ac21..1c9c74b 100644
--- a/docs/release/scenarios/os-odl-bgpvpn/index.rst
+++ b/docs/release/scenarios/os-odl-bgpvpn/index.rst
@@ -1,14 +1,15 @@
-.. _sdnvpn-os-odl-bgpvpn-noha:
-
-.. _sdnvpn-os-odl-bgpvpn-ha:
-
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) Tim Irnich <tim.irnich@ericsson.com> and others
+.. (c) Periyasamy Palanisamy <periyasamy.palanisamy@ericsson.com> and others
+
+.. _os-odl-bgpvpn-noha:
+
+.. _os-odl-bgpvpn-ha:
+
+================================================================
+os-odl-bgpvpn-noha and os-odl-bgpvpn-ha overview and description
+================================================================
-=========================================
-os-odl_l2-bgpvpn overview and description
-=========================================
.. This document will be used to provide a description of the scenario for an end user.
.. You should explain the purpose of the scenario, the types of capabilities provided and
.. the unique components that make up the scenario including how they are used.
diff --git a/docs/release/scenarios/os-odl-bgpvpn/scenario.description.rst b/docs/release/scenarios/os-odl-bgpvpn/scenario.description.rst
index 2641d82..8d1cb9c 100644
--- a/docs/release/scenarios/os-odl-bgpvpn/scenario.description.rst
+++ b/docs/release/scenarios/os-odl-bgpvpn/scenario.description.rst
@@ -1,6 +1,6 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) Tim Irnich (tim.irnich@ericsson.com) and Nikolas Hermanns (nikolas.hermanns@ericsson.com)
+.. SPDX-License-Identifier: CC-BY-4.0
+.. (c) Periyasamy Palanisamy <periyasamy.palanisamy@ericsson.com> and others
Introduction
============
@@ -21,9 +21,8 @@ deployment scenarios, which is derived from the baseline
os-odl-nofeature scenario.
The BGPVPN feature enables creation of BGP VPNs on the Neutron API
-according to the OpenStack BGPVPN blueprint at
-https://blueprints.launchpad.net/neutron/+spec/neutron-bgp-vpn. In a
-nutshell, the blueprint defines a BGPVPN object and a number of ways how
+according to the `OpenStack BGPVPN blueprint <https://blueprints.launchpad.net/neutron/+spec/neutron-bgp-vpn>`_.
+In a nutshell, the blueprint defines a BGPVPN object and a number of ways how
to associate it with the existing Neutron object model, as well as a
unique definition of the related semantics. The BGPVPN framework
supports a backend driver model with currently available drivers for
@@ -72,28 +71,23 @@ Scenario usage overview
Configuring SDNVPN features
---------------------------
-Each installer has specific procedures to deploy the OPNFV platform so that the SDNVPN feature is enabled.
+Apex installer has specific procedures to deploy the OPNFV platform so that the SDNVPN feature is
+enabled.
-Fuel installer configuration
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+APEX installer configuration and BGPVPN deployment
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-To install the SDNVPN feature using Fuel, follow the Fuel installation guide ensuring to select the SDNVPN
-feature when prompted <add link to Fuel docs once artifact locations are known>.
+To install the SDNVPN feature using the APEX installer, follow the `APEX installation guide <(https://wiki.
+opnfv.org/display/apex/Integration+Guide)>`_ . When prompted activate the SDNVPN feature based on
+openstack configuration:
-This will trigger installation of the OpenStack BGPVPN API extension for
-Neutron, set up for using the ODL driver, in addition to vanilla Neutron.
-In addition, the required karaf features will be activated when ODL is installed and the compute nodes
-will be configured including the VPN Service internal transport tunnel mesh.
+* For os-odl-bgpvpn-noha deployment:
-No post-deploy configuration is necessary. The Fuel BGPVPN plugin and the ODL plugin
-should set up the cluster ready for BGPVPNs being created.
+ python3 deploy.py -v -n ../config/network/network_settings.yaml -d ../config/deploy/os-odl-bgpvpn-noha.yaml --deploy-dir ../build --lib-dir ../lib --image-dir ../.build --virtual-computes 2 --virtual-default-ram 16
-APEX installer configuration
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+* For os-odl-bgpvpn-ha deployment:
-To install the SDNVPN feature using the APEX installer, follow the APEX installation guide
-(https://wiki.opnfv.org/display/apex/Integration+Guide) and activate the SDNVPN feature when prompted (step "# Now execute a deployment")
-i.e. python3 deploy.py -v -n ../config/network/network_settings.yaml -d ../config/deploy/os-odl-bgpvpn-noha.yaml --deploy-dir ../build --lib-dir ../lib --image-dir ../.build --virtual-computes 2 --virtual-default-ram 16
+ python3 deploy.py -v -n ../config/network/network_settings.yaml -d ../config/deploy/os-odl-bgpvpn-ha.yaml --deploy-dir ../build --lib-dir ../lib --image-dir ../.build --virtual-computes 2 --virtual-default-ram 16
Limitations, Issues and Workarounds
===================================
@@ -112,5 +106,5 @@ Integration with data center gateway will not work due to missing OVS patches fo
References
==========
-For more information on the OPNFV Danube release, please visit
-https://www.opnfv.org/software
+For more information on the OPNFV latest stable release, please visit
+https://www.opnfv.org/software \ No newline at end of file
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000..9fde2df
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,2 @@
+lfdocs-conf
+sphinx_opnfv_theme
diff --git a/odl-pipeline/lib/odl_reinstaller.sh b/odl-pipeline/lib/odl_reinstaller.sh
index cb34489..a55f16c 100644..100755
--- a/odl-pipeline/lib/odl_reinstaller.sh
+++ b/odl-pipeline/lib/odl_reinstaller.sh
@@ -11,4 +11,24 @@ set -e
export PYTHONPATH=$PYTHONPATH:$DIR
mkdir -p $DIR/tmp
cd $DIR
+cat > opendaylight.service << EOF
+[Unit]
+Description=OpenDaylight SDN Controller
+Documentation=https://wiki.opendaylight.org/view/Main_Page http://www.opendaylight.org/
+After=network.service
+
+[Service]
+Type=forking
+ExecStart=/opt/opendaylight/bin/start
+Environment=_JAVA_OPTIONS='-Djava.net.preferIPv4Stack=true'
+User=odl
+Group=odl
+SuccessExitStatus=143
+LimitNOFILE=102400
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+EOF
+curl --fail --silent -L -O http://artifacts.opnfv.org/apex/random/aaa-cli-jar.jar
python ./odl_reinstaller/odl_reinstaller.py $@
diff --git a/odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py b/odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py
index c0cf075..3d29724 100644
--- a/odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py
+++ b/odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py
@@ -8,6 +8,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
#
+import os
import re
import time
@@ -19,13 +20,14 @@ from utils.service import Service
from utils.node_manager import NodeManager
from utils import utils_yaml
+ODL_SYSTEMD = '/usr/lib/systemd/system/opendaylight.service'
+ODL_AAA_JAR = '/opt/opendaylight/bin/aaa-cli-jar.jar'
+
@for_all_methods(log_enter_exit)
class ODLReInstaller(Service):
def __init__(self):
- self.netvirt_url = "restconf/operational/network-topology:" \
- "network-topology/topology/netvirt:1"
self.nodes = None
self.odl_node = None
@@ -46,17 +48,27 @@ class ODLReInstaller(Service):
if 'controller' in node.execute('echo $HOSTNAME')[0]:
first_controller = node
# Check if ODL runs on this node
- rv, _ = node.execute('ps aux |grep -v grep |grep karaf',
- as_root=True, check_exit_code=[0, 1])
- if 'java' in rv:
+ jrv, _ = node.execute('ps aux |grep -v grep |grep karaf',
+ as_root=True, check_exit_code=[0, 1])
+ rv, (_, rc) = node.execute('docker ps | grep opendaylight_api',
+ as_root=True, check_exit_code=[0, 1])
+ if rc == 0:
+ LOG.info("ODL is running as docker container")
+ node.execute('docker stop opendaylight_api', as_root=True)
+ self.odl_node = node
+ elif 'java' in jrv:
+ LOG.info("ODL is running as systemd service")
self.odl_node = node
- LOG.info("ODL node found: {}".format(self.odl_node.name))
node.execute('systemctl stop opendaylight', as_root=True)
+
+ if self.odl_node is not None:
+ LOG.info("ODL node found: {}".format(self.odl_node.name))
# rc 5 means the service is not there.
+ # rc 4 means the service cannot be found
node.execute('systemctl stop bgpd', as_root=True,
- check_exit_code=[0, 5])
+ check_exit_code=[0, 4, 5])
node.execute('systemctl stop zrpcd', as_root=True,
- check_exit_code=[0, 5])
+ check_exit_code=[0, 4, 5])
self.disconnect_ovs(node)
@@ -66,12 +78,12 @@ class ODLReInstaller(Service):
self.reinstall_odl(self.odl_node, odl_artifact)
# Wait for ODL to come back up
- full_netvirt_url = "http://{}:8081/{}".format(
- self.odl_node.config['address'], self.netvirt_url)
+ full_netvirt_url = "http://{}:8081/diagstatus".format(
+ self.odl_node.config['address'])
counter = 1
while counter <= 10:
try:
- self.odl_node.execute("curl --fail -u admin:admin {}".format(
+ self.odl_node.execute("curl --fail {}".format(
full_netvirt_url))
LOG.info("New OpenDaylight NetVirt is Up")
break
@@ -81,7 +93,7 @@ class ODLReInstaller(Service):
LOG.warning("NetVirt not detected as up after 10 "
"attempts...deployment may be unstable!")
counter += 1
- time.sleep(10)
+ time.sleep(15)
# Reconnect OVS instances
LOG.info("Reconnecting OVS instances")
@@ -97,9 +109,11 @@ class ODLReInstaller(Service):
def _start_service_if_enabled(self, node, service):
# rc 3 means service inactive
+ # rc 4 means service cannot be found
# rc 5 mean no service available
status, _ = node.execute('systemctl status {}'.
- format(service), check_exit_code=[0, 3, 5])
+ format(service), check_exit_code=[0, 3,
+ 4, 5])
if 'service; enabled' in status:
LOG.info('Starting {}'.format(service))
node.execute('systemctl start {}'.format(service), as_root=True)
@@ -115,11 +129,34 @@ class ODLReInstaller(Service):
node.execute('rm -rf /opt/opendaylight/', as_root=True)
node.execute('mkdir -p /opt/opendaylight/', as_root=True)
if 'tar.gz' in odl_artifact:
+ # check if systemd service exists (may not if this was a docker
+ # deployment)
+ if not node.is_file(ODL_SYSTEMD):
+ LOG.info("Creating odl user, group, and systemd file")
+ # user/group may already exist so just ignore errors here
+ node.execute('groupadd odl', as_root=True,
+ check_exit_code=False)
+ node.execute('useradd -g odl odl', as_root=True,
+ check_exit_code=False)
+ systemd_file = os.path.join(os.getcwd(),
+ 'opendaylight.service')
+ node.copy('to', systemd_file, '/tmp/opendaylight.service',
+ check_exit_code=True)
+ node.execute('mv /tmp/opendaylight.service %s' % ODL_SYSTEMD,
+ as_root=True)
+ node.execute('systemctl daemon-reload', as_root=True)
LOG.info('Extracting %s to /opt/opendaylight/ on node %s'
% (odl_artifact, node.name))
node.execute('tar -zxf %s --strip-components=1 -C '
'/opt/opendaylight/'
% (tar_tmp_path + odl_artifact), as_root=True)
+ # AAA CLI jar for creating ODL user will be missing in regular
+ # netvirt distro. Only part of full distro.
+ if not node.is_file(ODL_AAA_JAR):
+ LOG.info("ODL AAA CLI jar missing, will copy")
+ aaa_cli_file = os.path.join(os.getcwd(),
+ 'aaa-cli-jar.jar')
+ node.copy('to', aaa_cli_file, ODL_AAA_JAR)
node.execute('chown -R odl:odl /opt/opendaylight', as_root=True)
if '.rpm' in odl_artifact:
LOG.info('Installing %s on node %s'
@@ -129,7 +166,11 @@ class ODLReInstaller(Service):
% (tar_tmp_path + odl_artifact), as_root=True)
node.execute('rm -rf ' + tar_tmp_path, as_root=True)
LOG.info('Starting Opendaylight on node %s' % node.name)
+ # we do not want puppet-odl to install the repo or the package, so we
+ # use tags to ignore those resources
node.execute('puppet apply -e "include opendaylight" '
+ '--tags file,concat,file_line,augeas,odl_user,'
+ 'odl_keystore,service '
'--modulepath=/etc/puppet/modules/ '
'--verbose --debug --trace --detailed-exitcodes',
check_exit_code=[2], as_root=True)
@@ -240,5 +281,6 @@ class ODLReinstallerException(Exception):
def main():
ODLReInstaller().start()
+
if __name__ == '__main__':
main()
diff --git a/odl-pipeline/lib/test_environment/test_environment.py b/odl-pipeline/lib/test_environment/test_environment.py
index 65d40bb..a56c36f 100755
--- a/odl-pipeline/lib/test_environment/test_environment.py
+++ b/odl-pipeline/lib/test_environment/test_environment.py
@@ -157,5 +157,6 @@ def main():
main = TestEnvironment()
main.start()
+
if __name__ == '__main__':
main()
diff --git a/odl-pipeline/lib/tripleo_introspector/tripleo_introspector.py b/odl-pipeline/lib/tripleo_introspector/tripleo_introspector.py
index aa6ebbb..9258e83 100755
--- a/odl-pipeline/lib/tripleo_introspector/tripleo_introspector.py
+++ b/odl-pipeline/lib/tripleo_introspector/tripleo_introspector.py
@@ -122,5 +122,6 @@ class TripleOInspectorException(Exception):
def main():
TripleOIntrospector().start()
+
if __name__ == '__main__':
main()
diff --git a/odl-pipeline/lib/utils/processutils.py b/odl-pipeline/lib/utils/processutils.py
index 98162c8..901e74b 100755
--- a/odl-pipeline/lib/utils/processutils.py
+++ b/odl-pipeline/lib/utils/processutils.py
@@ -29,6 +29,7 @@ def _subprocess_setup():
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
# NOTE(flaper87): The following globals are used by `mask_password`
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
diff --git a/requirements.txt b/requirements.txt
index 63f4ae2..252b214 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,8 +1,12 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr!=2.1.0,>=2.0.0 # Apache-2.0
-requests>=2.14.2 # Apache-2.0
+pbr!=2.1.0 # Apache-2.0
+requests # Apache-2.0
opnfv
-PyYAML>=3.10.0 # MIT
-networking-bgpvpn==6.0.0 # Apache-2.0
+PyYAML # MIT
+networking-bgpvpn>=7.0.0 # Apache-2.0
+python-keystoneclient!=2.1.0 # Apache-2.0
+python-neutronclient # Apache-2.0
+xtesting # Apache-2.0
+openstacksdk # Apache-2.0
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/README b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/README
new file mode 100644
index 0000000..aee72d1
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/README
@@ -0,0 +1,23 @@
+This is the role which deploys the os-odl-bgpvpn scenarios in xci.
+
+# SCENARIOS #
+
+## os-odl-bgpvpn-noha ##
+
+To run os-odl-bgpvpn-noha you should export the following variables before
+running xci-deploy.sh. Note that you should change xxxx by the path where
+your releng-xci code is:
+
+export XCI_FLAVOR=noha
+export DEPLOY_SCENARIO=os-odl-bgpvpn
+export OPENSTACK_OSA_VERSION=master
+
+
+## os-odl-bgpvpn-ha ##
+
+To run os-odl-bgpvpn-ha you should export the following variables before
+running xci-deploy.sh:
+
+export XCI_FLAVOR=ha
+export DEPLOY_SCENARIO=os-odl-bgpvpn
+export OPENSTACK_OSA_VERSION=master
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/ansible-role-requirements-pike.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/ansible-role-requirements-pike.yml
new file mode 100644
index 0000000..d357ed7
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/ansible-role-requirements-pike.yml
@@ -0,0 +1,223 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# This file is borrowed from os-odl-sfc scenario for stable/pike version
+# https://github.com/opnfv/sfc/blob/master/scenarios/os-odl-sfc/role/os-odl-sfc/files/ansible-role-requirements-pike.yml
+- name: ansible-hardening
+ scm: git
+ src: https://git.openstack.org/openstack/ansible-hardening
+ version: c05e36f48de66feb47046a0126d986fa03313f29
+- name: apt_package_pinning
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
+ version: 9403a36513aee54c15890ac96c1f8c455f9c083d
+- name: pip_install
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-pip_install
+ version: df107891bf9fdfa7287bdfe43f3fa0120a80e5ad
+- name: galera_client
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-galera_client
+ version: 52b374547648056b58c544532296599801d501d7
+- name: galera_server
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-galera_server
+ version: b124e06872ebeca7d81cb22fb80ae97a995b07a8
+- name: ceph_client
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-ceph_client
+ version: 5fcbc68fdbd3105d233fd3c03c887f13227b1c3d
+- name: haproxy_server
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-haproxy_server
+ version: a905aaed8627f59d9dc10b9bc031589a7c65828f
+- name: keepalived
+ scm: git
+ src: https://github.com/evrardjp/ansible-keepalived
+ version: 3.0.3
+- name: lxc_container_create
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-lxc_container_create
+ version: c41d3b20da6be07d9bf5db7f7e6a1384c7cfb5eb
+- name: lxc_hosts
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-lxc_hosts
+ version: d974c4db1696027899b28b2cb58800cae9a605e5
+- name: memcached_server
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-memcached_server
+ version: 08c483f3c5d49c236194090534a015b67c8cded6
+- name: openstack_hosts
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-openstack_hosts
+ version: a0d3b9c9756b6e95b0e034f3d0576fbb33607820
+- name: os_keystone
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_keystone
+ version: ffc9c9b5e681748ff3e54e43f22c921e83342a51
+- name: openstack_openrc
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-openstack_openrc
+ version: b27229ef168aed7f2febf6991b2d7459ec8883ee
+- name: os_aodh
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_aodh
+ version: bcd77b1e10a7054e9365da6a20848b393153d025
+- name: os_barbican
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_barbican
+ version: 0797e8bdadd2fcf4696b22f0e18340c8d9539b09
+- name: os_ceilometer
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_ceilometer
+ version: 4b3e0589a0188de885659614ef4e076018af54f7
+- name: os_cinder
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_cinder
+ version: 6f5ab34e5a0694f3fc84e63c912e00e86e3de280
+- name: os_designate
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_designate
+ version: eac6d3c674397097d8adf722635252b1822c8f6c
+- name: os_glance
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_glance
+ version: 47080919c937aace65fc7dc8e9670dbcfd910b88
+- name: os_gnocchi
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_gnocchi
+ version: 5f8950f61ed6b61d1cc06ab73b3b02466bee0db1
+- name: os_heat
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_heat
+ version: 4d1efae631026631fb2af4f43a9fe8ca210d643e
+- name: os_horizon
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_horizon
+ version: 71aa69b1425f5b5b2bdc274357b62a9b4b57ae8f
+- name: os_ironic
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_ironic
+ version: 34205b6b99fc3cfe54eddbcde0380e626976e425
+- name: os_magnum
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_magnum
+ version: 0fdeea886ef4227e02d793f6dbfd54ccd9e6e088
+- name: os_molteniron
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_molteniron
+ version: 58cff32e954ab817d07b8e0a136663c34d7f7b60
+- name: os_neutron
+ scm: git
+ src: https://github.com/pperiyasamy/openstack-ansible-os_neutron
+ version: odl-bgpvpn-support
+- name: os_nova
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_nova
+ version: 80e0d04822f7ddc5b8d574329e4eb8a76aea63ff
+- name: os_octavia
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_octavia
+ version: 5fd1fbae703c17f928cfc00f60aeeed0500c6f2b
+- name: os_rally
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_rally
+ version: b2658fb704fd3a1e8bce794b8bf87ac83931aa46
+- name: os_sahara
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_sahara
+ version: e3e4f1bc8d72dd6fb7e26b8d0d364f9a60e16b0f
+- name: os_swift
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_swift
+ version: 0bb5979de285305f652694cee139390a8102c134
+- name: os_tempest
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_tempest
+ version: 0fb52fcd130bee25f40cd515da69948821d5b504
+- name: os_trove
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-os_trove
+ version: 6596f6b28c88a88c89e293ea8f5f8551eb491fd1
+- name: plugins
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-plugins
+ version: 11aed400f86951593bb60d1e853574b67894b0b3
+- name: rabbitmq_server
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-rabbitmq_server
+ version: fa80dfc0f8129e02f3f3b34bb7205889d3e5696c
+- name: repo_build
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-repo_build
+ version: d0079ff721b0f9c4682d57eccfadb36f365eea2b
+- name: repo_server
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-repo_server
+ version: 8302adcb11cad4e6245fd6bd1bbb4db08d3b60e9
+- name: rsyslog_client
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_client
+ version: f41638370114412b97c6523b4c626ca70f0337f4
+- name: rsyslog_server
+ scm: git
+ src: https://git.openstack.org/openstack/openstack-ansible-rsyslog_server
+ version: 61a3ab251f63c6156f2a6604ee1a822070e19c90
+- name: sshd
+ scm: git
+ src: https://github.com/willshersystems/ansible-sshd
+ version: 0.5.1
+- name: bird
+ scm: git
+ src: https://github.com/logan2211/ansible-bird
+ version: '1.4'
+- name: etcd
+ scm: git
+ src: https://github.com/logan2211/ansible-etcd
+ version: '1.3'
+- name: unbound
+ scm: git
+ src: https://github.com/logan2211/ansible-unbound
+ version: '1.5'
+- name: resolvconf
+ scm: git
+ src: https://github.com/logan2211/ansible-resolvconf
+ version: '1.3'
+- name: ceph-defaults
+ scm: git
+ src: https://github.com/ceph/ansible-ceph-defaults
+ version: v3.0.8
+- name: ceph-common
+ scm: git
+ src: https://github.com/ceph/ansible-ceph-common
+ version: v3.0.8
+- name: ceph-config
+ scm: git
+ src: https://github.com/ceph/ansible-ceph-config
+ version: v3.0.8
+- name: ceph-mon
+ scm: git
+ src: https://github.com/ceph/ansible-ceph-mon
+ version: v3.0.8
+- name: ceph-mgr
+ scm: git
+ src: https://github.com/ceph/ansible-ceph-mgr
+ version: v3.0.8
+- name: ceph-osd
+ scm: git
+ src: https://github.com/ceph/ansible-ceph-osd
+ version: v3.0.8
+- name: os_tacker
+ scm: git
+ src: https://github.com/manuelbuil/openstack-ansible-os_tacker
+ version: pike-suse-support
+- name: opendaylight
+ scm: git
+ src: https://git.opendaylight.org/gerrit/p/integration/packaging/ansible-opendaylight.git
+ version: 2af197bd13f77d2a07878b160c00f8ceeebb3c34 \ No newline at end of file
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/ha/openstack_user_config.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/ha/openstack_user_config.yml
new file mode 100644
index 0000000..2ca5a98
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/ha/openstack_user_config.yml
@@ -0,0 +1,256 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+ - "172.29.236.222"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.222
+ external_lb_vip_address: 192.168.122.220
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ host_bind_override: "eth12"
+ type: "vlan"
+ range: "102:199"
+ net_name: "physnet1"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.14
+ compute01:
+ ip: 172.29.236.15
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/ha/user_variables_os-odl-bgpvpn-ha.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/ha/user_variables_os-odl-bgpvpn-ha.yml
new file mode 100644
index 0000000..25cd683
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/ha/user_variables_os-odl-bgpvpn-ha.yml
@@ -0,0 +1,22 @@
+---
+# Copyright (c) 2017 Ericsson AB and others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# Enable clustering for opendaylight
+cluster: true \ No newline at end of file
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/mini/openstack_user_config.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/mini/openstack_user_config.yml
new file mode 100644
index 0000000..0f8ccd1
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/mini/openstack_user_config.yml
@@ -0,0 +1,171 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ host_bind_override: "eth12"
+ type: "vlan"
+ range: "102:199"
+ net_name: "physnet1"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/noha/openstack_user_config.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/noha/openstack_user_config.yml
new file mode 100644
index 0000000..7ed9cd3
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/noha/openstack_user_config.yml
@@ -0,0 +1,173 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ host_bind_override: "eth12"
+ type: "vlan"
+ range: "102:199"
+ net_name: "physnet1"
+ group_binds:
+ - neutron_openvswitch_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+ compute01:
+ ip: 172.29.236.13
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/group_vars/quagga_all.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/group_vars/quagga_all.yml
new file mode 100644
index 0000000..342a2f2
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/group_vars/quagga_all.yml
@@ -0,0 +1,46 @@
+---
+# Copyright (c) 2017-2018 Ericsson AB and others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Distro on which quagga packages to be installed
+quagga_install_method: "{{ (ansible_os_family=='Debian') | ternary('deb_repo', 'rpm_repo') }}"
+
+# Quagga deb packages repository location
+quagga_deb_repo: https://wiki.opnfv.org/download/attachments/6827916/quagga-ubuntu.tar.gz
+
+# Quagga rpm packages repository location
+quagga_rpm_repo: https://wiki.opnfv.org/download/attachments/6827916/quagga-4.tar.gz
+
+# Find out correct quagga package url based on the
+# quagga install method
+quagga_package_url: "{% if quagga_install_method == 'deb_repo' %}{{ quagga_deb_repo }}{% else %}{{ quagga_rpm_repo }}{% endif %}"
+
+# Temporary location on which quagga packages to be
+# retrieved for the installation
+temp_quagga_dir: /tmp/quagga
+
+# The neutron server node on which OSA configures ODL
+# as the BGP speaker
+odl_bgp_speaker_host: "{{ ((groups['neutron_server'] | intersect(ansible_play_hosts)) | list)[0] }}"
+
+# BGP Config Server Port at quagga side for ODL to establish
+# thrift connection.
+bgp_config_server_port: 7644
+
+# The ODL client location which is used to run the karaf
+# commands to configure ODL as BGP speaker
+opendaylight_karaf_client: /opt/opendaylight/bin/client
+
+# The ODL karaf host ip address
+opendaylight_karaf_host: 127.0.0.1
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/defaults/repo-packages/opendaylight.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/defaults/repo-packages/opendaylight.yml
new file mode 100644
index 0000000..5316b1b
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/defaults/repo-packages/opendaylight.yml
@@ -0,0 +1,9 @@
+---
+
+networking_odl_git_repo: https://git.openstack.org/openstack/networking-odl
+networking_odl_git_install_branch: stable/pike
+networking_odl_git_project_group: neutron_all
+
+networking_bgpvpn_git_repo: https://git.openstack.org/openstack/networking-bgpvpn
+networking_bgpvpn_git_install_branch: stable/pike
+networking_bgpvpn_project_group: neutron_all
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/inventory/env.d/neutron.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/inventory/env.d/neutron.yml
new file mode 100644
index 0000000..f1d85b4
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/inventory/env.d/neutron.yml
@@ -0,0 +1,87 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+component_skel:
+ neutron_agent:
+ belongs_to:
+ - neutron_all
+ neutron_dhcp_agent:
+ belongs_to:
+ - neutron_all
+ neutron_linuxbridge_agent:
+ belongs_to:
+ - neutron_all
+ neutron_openvswitch_agent:
+ belongs_to:
+ - neutron_all
+ neutron_metering_agent:
+ belongs_to:
+ - neutron_all
+ neutron_l3_agent:
+ belongs_to:
+ - neutron_all
+ neutron_lbaas_agent:
+ belongs_to:
+ - neutron_all
+ neutron_bgp_dragent:
+ belongs_to:
+ - neutron_all
+ neutron_metadata_agent:
+ belongs_to:
+ - neutron_all
+ neutron_sriov_nic_agent:
+ belongs_to:
+ - neutron_all
+ neutron_server:
+ belongs_to:
+ - neutron_all
+ opendaylight:
+ belongs_to:
+ - neutron_all
+
+
+container_skel:
+ neutron_agents_container:
+ belongs_to:
+ - network_containers
+ contains:
+ - neutron_agent
+ - neutron_bgp_dragent
+ - neutron_dhcp_agent
+ - neutron_l3_agent
+ - neutron_lbaas_agent
+ - neutron_linuxbridge_agent
+ - neutron_metadata_agent
+ - neutron_metering_agent
+ - neutron_openvswitch_agent
+ - neutron_sriov_nic_agent
+ properties:
+ is_metal: true
+ neutron_server_container:
+ belongs_to:
+ - network_containers
+ contains:
+ - neutron_server
+ - opendaylight
+ - quagga_server
+
+
+physical_skel:
+ network_containers:
+ belongs_to:
+ - all_containers
+ network_hosts:
+ belongs_to:
+ - hosts
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/inventory/env.d/quagga.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/inventory/env.d/quagga.yml
new file mode 100644
index 0000000..19b890c
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/inventory/env.d/quagga.yml
@@ -0,0 +1,19 @@
+---
+# Copyright (c) 2017-2018 Ericsson AB and others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+component_skel:
+ quagga_server:
+ belongs_to:
+ - quagga_all
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/os-setup-bgp-odl.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/os-setup-bgp-odl.yml
new file mode 100644
index 0000000..67a3b2d
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/openstack-ansible/pike/playbooks/os-setup-bgp-odl.yml
@@ -0,0 +1,77 @@
+---
+# Copyright (c) 2017-2018 Ericsson AB and others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+- name: Install and Configure OpenDaylight for BGPVPN
+ hosts: quagga_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+ user: root
+
+ tasks:
+ - name: Retrieve checksum for quagga download
+ uri:
+ url: "{{ quagga_package_url | replace('tar.gz', 'checksum') }}"
+ return_content: yes
+ register: quagga_package_checksum
+
+ - name: Attempt quagga download
+ get_url:
+ url: "{{ quagga_package_url }}"
+ dest: "/var/cache/{{ quagga_package_url | basename }}"
+ checksum: "sha1:{{ quagga_package_checksum.content | trim }}"
+ register: osa_get_quagga
+
+ - name: Unarchive pre-built quagga packages
+ unarchive:
+ src: "/var/cache/{{ quagga_package_url | basename }}"
+ dest: "{{ temp_quagga_dir | dirname }}"
+ copy: "no"
+
+ - name: Prerequisite check for quagga
+ apt:
+ name: libglib2.0-0
+ state: present
+ when: quagga_install_method == "deb_repo"
+
+ - name: Prerequisite check for quagga
+ yum:
+ name: glib2,glib2-devel
+ state: present
+ when: quagga_install_method == "rpm_repo"
+
+ - name: Install quagga rpm packages
+ shell: |
+ cd {{ temp_quagga_dir }}
+ packages=$(ls |grep -vE 'debuginfo|devel|contrib')
+ yum -y $packages
+ when: quagga_install_method == "rpm_repo"
+
+ - name: Install quagga Debian packages
+ shell: |
+ cd {{ temp_quagga_dir }}
+ packages=$(ls |grep -vE 'debuginfo|devel|contrib')
+ dpkg -i $packages
+ when: quagga_install_method == "deb_repo"
+
+ - name: Start Zebra RPC Daemon for Quagga
+ command: /opt/quagga/etc/init.d/zrpcd start
+ when: inventory_hostname == odl_bgp_speaker_host
+
+ - name: Connect OpenDaylight with Quagga
+ command: "{{ opendaylight_karaf_client }} -h {{ opendaylight_karaf_host }} 'bgp-connect --host {{ opendaylight_karaf_host }} --port {{ bgp_config_server_port }} add'"
+ when: inventory_hostname == odl_bgp_speaker_host
+
+ - name: Configure Opendaylight as BGP speaker
+ command: "{{ opendaylight_karaf_client }} -h {{ opendaylight_karaf_host }} 'odl:configure-bgp -op start-bgp-server --as-num 100 --router-id {{ odl_bgp_speaker_host }}'"
+ when: inventory_hostname == odl_bgp_speaker_host
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/setup-openstack.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/setup-openstack.yml
new file mode 100644
index 0000000..7ebbe73
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/setup-openstack.yml
@@ -0,0 +1,29 @@
+---
+# Copyright (c) 2018 Ericsson AB and others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+- include: os-keystone-install.yml
+- include: os-glance-install.yml
+- include: os-cinder-install.yml
+- include: os-nova-install.yml
+- include: os-neutron-install.yml
+# TODO: uncomment this playbook after https://review.openstack.org/#/c/523907/ is merged
+#- include: os-setup-bgp-odl.yml
+- include: os-heat-install.yml
+- include: os-horizon-install.yml
+- include: os-swift-install.yml
+- include: os-ironic-install.yml
+- include: os-tacker-install.yml
+- include: os-tempest-install.yml
+ when: (tempest_install | default(False)) | bool or (tempest_run | default(False)) | bool
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/user_variables_os-odl-bgpvpn.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/user_variables_os-odl-bgpvpn.yml
new file mode 100644
index 0000000..47ef29b
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/files/user_variables_os-odl-bgpvpn.yml
@@ -0,0 +1,52 @@
+---
+# Copyright (c) 2017 Ericsson AB and others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# Ensure the openvswitch kernel module is loaded
+openstack_host_specific_kernel_modules:
+ - name: "openvswitch"
+ pattern: "CONFIG_OPENVSWITCH"
+ group: "network_hosts"
+
+# Use OpenDaylight SDN Controller
+neutron_plugin_type: "ml2.opendaylight"
+neutron_opendaylight_conf_ini_overrides:
+ ml2_odl:
+ username: "admin"
+ password: "admin"
+ port_binding_controller: "pseudo-agentdb-binding"
+ url: "http://{{ internal_lb_vip_address }}:8180/controller/nb/v2/neutron"
+
+neutron_ml2_drivers_type: "flat,vlan,vxlan"
+
+neutron_plugin_base:
+ - odl-router_v2
+ - bgpvpn
+
+# The neutron server node on which OSA configures ODL
+# as the BGP speaker
+odl_bgp_speaker_host: "{{ ((groups['neutron_server'] | intersect(ansible_play_hosts)) | list)[0] }}"
+
+# The neutron server node ip address (br-admin) on which OSA configures ODL
+# as the BGP speaker
+odl_bgp_speaker_host_ip_address: "{{ hostvars[groups['neutron_server'][0]]['container_address'] }}"
+
+# Configure OpenDaylight with Quagga
+quagga: true
+
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/tasks/add-inventory-files-pike.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/tasks/add-inventory-files-pike.yml
new file mode 100644
index 0000000..11ca33d
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/tasks/add-inventory-files-pike.yml
@@ -0,0 +1,16 @@
+---
+
+- name: Add networking-odl and networking-bgpvpn repos
+ copy:
+ src: openstack-ansible/pike/playbooks/defaults/repo_packages/opendaylight.yml
+ dest: "{{openstack_osa_path}}/playbooks/defaults/repo_packages/opendaylight.yml"
+
+- name: Provide neutron inventory which adds quagga into neutron server
+ copy:
+ src: openstack-ansible/pike/playbooks/inventory/env.d/neutron.yml
+ dest: "{{openstack_osa_path}}/playbooks/inventory/env.d/neutron.yml"
+
+- name: Provide Quagga inventory which adds quagga hosts
+ copy:
+ src: openstack-ansible/pike/playbooks/inventory/env.d/quagga.yml
+ dest: "{{openstack_osa_path}}/playbooks/inventory/env.d/quagga.yml"
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/tasks/add-osa-files-pike.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/tasks/add-osa-files-pike.yml
new file mode 100644
index 0000000..46c3700
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/tasks/add-osa-files-pike.yml
@@ -0,0 +1,16 @@
+---
+
+- name: copy quagga variable file
+ copy:
+ src: openstack-ansible/pike/group-vars/quagga_all.yml
+ dest: "{{openstack_osa_path}}/group-vars/quagga_all.yml"
+
+- name: Add the Quagga configuration playbook
+ copy:
+ src: openstack-ansible/pike/playbooks/os-setup-bgp-odl.yml
+ dest: "{{openstack_osa_path}}/playbooks/os-setup-bgp-odl.yml"
+
+- name: copy OPNFV role requirements
+ copy:
+ src: "ansible-role-requirements-pike.yml"
+ dest: "{{openstack_osa_path}}/ansible-role-requirements.yml"
diff --git a/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/tasks/main.yml b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/tasks/main.yml
new file mode 100644
index 0000000..76ee389
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/role/os-odl-bgpvpn/tasks/main.yml
@@ -0,0 +1,39 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017-18 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+- name: copy user_variables_os-odl-bgpvpn.yml
+ copy:
+ src: "user_variables_os-odl-bgpvpn.yml"
+ dest: "{{openstack_osa_etc_path}}/user_variables_os-odl-bgpvpn.yml"
+
+- name: copy user_variables_os-odl-bgpvpn-ha.yml
+ copy:
+ src: "{{xci_flavor}}/user_variables_os-odl-bgpvpn-ha.yml"
+ dest: "{{openstack_osa_etc_path}}/user_variables_os-odl-bgpvpn-ha.yml"
+ when:
+ - xci_flavor == "ha"
+
+- name: copy os-odl-bgpvpn scenario specific openstack_user_config.yml
+ copy:
+ src: "{{xci_flavor}}/openstack_user_config.yml"
+ dest: "{{openstack_osa_etc_path}}/openstack_user_config.yml"
+
+- name: copy os-odl-bgpvpn scenario specific setup-openstack.yml
+ copy:
+ src: "setup-openstack.yml"
+ dest: "{{openstack_osa_path}}/playbooks"
+
+- name: Copy the OSA not-yet-upstreamed files for Pike
+ include: add-osa-files-pike.yml
+ when: openstack_osa_version == "stable/pike"
+
+- name: Copy the OSA not-yet-upstreamed inventory files for Pike
+ include: add-inventory-files-pike.yml
+ when: openstack_osa_version == "stable/pike"
diff --git a/scenarios/os-odl-bgpvpn/xci_overrides b/scenarios/os-odl-bgpvpn/xci_overrides
new file mode 100644
index 0000000..8b25c4f
--- /dev/null
+++ b/scenarios/os-odl-bgpvpn/xci_overrides
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+if [[ $DEPLOY_SCENARIO == "os-odl-bgpvpn" ]] && [[ $XCI_FLAVOR == "ha" ]]; then
+ export VM_MEMORY_SIZE=20480
+elif [[ $DEPLOY_SCENARIO == "os-odl-bgpvpn" ]]; then
+ export VM_MEMORY_SIZE=16384
+fi \ No newline at end of file
diff --git a/sdnvpn/artifacts/quagga_setup.sh b/sdnvpn/artifacts/quagga_setup.sh
index a8fe9f6..c6e6a9c 100644
--- a/sdnvpn/artifacts/quagga_setup.sh
+++ b/sdnvpn/artifacts/quagga_setup.sh
@@ -1,27 +1,30 @@
#! /bin/bash
set -xe
-
# change the password because this script is run on a passwordless cloud-image
echo 'ubuntu:opnfv' | chpasswd
# Wait for a floating IP
# as a workaround to NAT breakage
-sleep 20
+sleep 100
# Variables to be filled in with python
-NEIGHBOR_IP=%s
-OWN_IP=%s
+NEIGHBOR_IP={0}
+OWN_IP={1}
# directly access the instance from the external net without NAT
-EXT_NET_MASK=%s
+EXT_NET_MASK={2}
+IP_PREFIX={3}
+RD={4}
+IRT={5}
+ERT={6}
-if [[ $(getent hosts | awk '{print $2}') != *"$(cat /etc/hostname | awk '{print $1}')"* ]]
-then
-echo "127.0.1.1 $(cat /etc/hostname | awk '{print $1}')" | tee -a /etc/hosts
+if [[ $(getent hosts | awk '{{print $2}}') != *"$(cat /etc/hostname | awk '{{print $1}}')"* ]]
+then
+echo "127.0.1.1 $(cat /etc/hostname | awk '{{print $1}}')" | tee -a /etc/hosts
fi
quagga_int=''
-for net_int in $(netstat -ia | awk 'NR>2{print $1}');
+for net_int in $(netstat -ia | awk 'NR>2{{print $1}}');
do
if [ -z "$(ifconfig | grep $net_int)" ]
then
@@ -32,65 +35,58 @@ done
if [ -z "$quagga_int" ]
then
echo 'No available network interface'
-fi
-
+else
ip link set $quagga_int up
ip addr add $OWN_IP/$EXT_NET_MASK dev $quagga_int
+fi
-ZEBRA_CONFIG_LOCATION="/etc/quagga/zebra.conf"
-DAEMONS_FILE_LOCATION="/etc/quagga/daemons"
-BGPD_CONFIG_LOCATION="/etc/quagga/bgpd.conf"
-BGPD_LOG_FILE="/var/log/bgpd.log"
-
-# Quagga is already installed to run as well in setups without inet
-# dns fix
-# echo "nameserver 8.8.8.8" > /etc/resolvconf/resolv.conf.d/head
-# resolvconf -u
-# DEBIAN_FRONTEND=noninteractive apt-get update
-# DEBIAN_FRONTEND=noninteractive apt-get install quagga -y
-
-touch $BGPD_LOG_FILE
-chown quagga:quagga $BGPD_LOG_FILE
-
-chown quagga:quagga $DAEMONS_FILE_LOCATION
-cat <<CATEOF > $DAEMONS_FILE_LOCATION
-zebra=yes
-bgpd=yes
-ospfd=no
-ospf6d=no
-ripd=no
-ripngd=no
-isisd=no
-babeld=no
-CATEOF
-
-touch $ZEBRA_CONFIG_LOCATION
-chown quagga:quagga $ZEBRA_CONFIG_LOCATION
-
-cat <<CATEOF > $BGPD_CONFIG_LOCATION
-! -*- bgp -*-
+# Download quagga/zrpc rpms
+cd /root
+wget http://artifacts.opnfv.org/sdnvpn/quagga4/quagga-ubuntu-updated.tar.gz
+tar -xvf quagga-ubuntu-updated.tar.gz
+cd /root/quagga
+dpkg -i c-capnproto_1.0.2.75f7901.Ubuntu16.04_amd64.deb
+dpkg -i zmq_4.1.3.56b71af.Ubuntu16.04_amd64.deb
+dpkg -i quagga_1.1.0.cd8ab40.Ubuntu16.04_amd64.deb
+dpkg -i thrift_1.0.0.b2a4d4a.Ubuntu16.04_amd64.deb
+dpkg -i zrpc_0.2.0efd19f.thriftv4.Ubuntu16.04_amd64.deb
-hostname bgpd
-password sdncbgpc
+nohup /opt/quagga/sbin/bgpd &
+cat > /tmp/quagga-config << EOF1
+config terminal
router bgp 200
- bgp router-id ${OWN_IP}
- neighbor ${NEIGHBOR_IP} remote-as 100
- no neighbor ${NEIGHBOR_IP} activate
+ bgp router-id $OWN_IP
+ no bgp log-neighbor-changes
+ bgp graceful-restart stalepath-time 90
+ bgp graceful-restart restart-time 900
+ bgp graceful-restart
+ bgp graceful-restart preserve-fw-state
+ bgp bestpath as-path multipath-relax
+ neighbor $NEIGHBOR_IP remote-as 100
+ no neighbor $NEIGHBOR_IP activate
+ vrf $RD
+ rd $RD
+ rt import $IRT
+ rt export $ERT
+ exit
+!
+address-family vpnv4
+neighbor $NEIGHBOR_IP activate
+neighbor $NEIGHBOR_IP attribute-unchanged next-hop
+exit
!
- address-family vpnv4 unicast
- neighbor ${NEIGHBOR_IP} activate
- exit-address-family
+route-map map permit 1
+ set ip next-hop $OWN_IP
+exit
!
-line vty
- exec-timeout 0 0
+router bgp 200
+address-family vpnv4
+network $IP_PREFIX rd $RD tag 100 route-map map
+exit
!
-debug bgp events
-debug bgp updates
-log file ${BGPD_LOG_FILE}
-end
-CATEOF
-chown quagga:quagga $BGPD_CONFIG_LOCATION
-service quagga restart
-pgrep bgpd
-pgrep zebra
+EOF1
+
+sleep 20
+
+(sleep 1;echo "sdncbgpc";sleep 1;cat /tmp/quagga-config;sleep 1; echo "exit") |nc -q1 localhost 2605
diff --git a/sdnvpn/artifacts/testcase_1bis.yaml b/sdnvpn/artifacts/testcase_1bis.yaml
new file mode 100644
index 0000000..f269943
--- /dev/null
+++ b/sdnvpn/artifacts/testcase_1bis.yaml
@@ -0,0 +1,234 @@
+heat_template_version: 2013-05-23
+
+description: >
+ Template for SDNVPN testcase 1
+ VPN provides connectivity between subnets
+
+parameters:
+ flavor:
+ type: string
+ description: flavor for the servers to be created
+ constraints:
+ - custom_constraint: nova.flavor
+ image_n:
+ type: string
+ description: image for the servers to be created
+ constraints:
+ - custom_constraint: glance.image
+ av_zone_1:
+ type: string
+ description: availability zone 1
+ av_zone_2:
+ type: string
+ description: availability zone 2
+
+ net_1_name:
+ type: string
+ description: network 1
+ subnet_1_name:
+ type: string
+ description: subnet 1 name
+ subnet_1_cidr:
+ type: string
+ description: subnet 1 cidr
+ net_2_name:
+ type: string
+ description: network 2
+ subnet_2_name:
+ type: string
+ description: subnet 2 name
+ subnet_2_cidr:
+ type: string
+ description: subnet 1 cidr
+
+ secgroup_name:
+ type: string
+ description: security group name
+ secgroup_descr:
+ type: string
+ description: security group slogan
+
+ instance_1_name:
+ type: string
+ description: instance name
+ instance_2_name:
+ type: string
+ description: instance name
+ instance_3_name:
+ type: string
+ description: instance name
+ instance_4_name:
+ type: string
+ description: instance name
+ instance_5_name:
+ type: string
+ description: instance name
+
+ ping_count:
+ type: string
+ description: ping count for user data script
+ default: 10
+
+resources:
+ net_1:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_1_name }
+ subnet_1:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_1_name }
+ network: { get_resource: net_1 }
+ cidr: { get_param: subnet_1_cidr }
+ net_2:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_2_name }
+ subnet_2:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_2_name }
+ network: { get_resource: net_2 }
+ cidr: { get_param: subnet_2_cidr }
+
+ sec_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: { get_param: secgroup_name }
+ description: { get_param: secgroup_descr }
+ rules:
+ - protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+ - protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: 0.0.0.0/0
+
+ vm1:
+ type: OS::Nova::Server
+ depends_on: [ vm2, vm3, vm4, vm5 ]
+ properties:
+ name: { get_param: instance_1_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ config_drive: True
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ set $IP_VM2 $IP_VM3 $IP_VM4 $IP_VM5
+ while true; do
+ for i do
+ ip=$i
+ ping -c $COUNT $ip 2>&1 >/dev/null
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ] ; then
+ echo ping $ip OK
+ else echo ping $ip KO
+ fi
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM2: { get_attr: [vm2, addresses, { get_resource: net_1}, 0, addr] }
+ $IP_VM3: { get_attr: [vm3, addresses, { get_resource: net_1}, 0, addr] }
+ $IP_VM4: { get_attr: [vm4, addresses, { get_resource: net_2}, 0, addr] }
+ $IP_VM5: { get_attr: [vm5, addresses, { get_resource: net_2}, 0, addr] }
+ $COUNT: { get_param: ping_count }
+ vm2:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_2_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ vm3:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_3_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_2 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ vm4:
+ type: OS::Nova::Server
+ depends_on: vm5
+ properties:
+ name: { get_param: instance_4_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_2 }
+ config_drive: True
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ set $IP_VM5
+ while true; do
+ for i do
+ ip=$i
+ ping -c $COUNT $ip 2>&1 >/dev/null
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ] ; then
+ echo ping $ip OK
+ else echo ping $ip KO
+ fi
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM5: { get_attr: [vm5, addresses, { get_resource: net_2}, 0, addr] }
+ $COUNT: { get_param: ping_count }
+
+ vm5:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_5_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_2 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_2 }
+
+outputs:
+ net_1_o:
+ description: the id of network 1
+ value: { get_attr: [net_1, show, id] }
+ net_2_o:
+ description: the id of network 2
+ value: { get_attr: [net_2, show, id] }
+ vm1_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm1, show, name] }
+ vm2_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm2, show, name] }
+ vm3_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm3, show, name] }
+ vm4_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm4, show, name] }
+ vm5_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm5, show, name] }
diff --git a/sdnvpn/artifacts/testcase_2bis.yaml b/sdnvpn/artifacts/testcase_2bis.yaml
new file mode 100644
index 0000000..0319a6d
--- /dev/null
+++ b/sdnvpn/artifacts/testcase_2bis.yaml
@@ -0,0 +1,289 @@
+heat_template_version: 2013-05-23
+
+description: >
+ Template for SDNVPN testcase 2
+ tenant separation
+
+parameters:
+ flavor:
+ type: string
+ description: flavor for the servers to be created
+ constraints:
+ - custom_constraint: nova.flavor
+ image_n:
+ type: string
+ description: image for the servers to be created
+ constraints:
+ - custom_constraint: glance.image
+ av_zone_1:
+ type: string
+ description: availability zone 1
+ id_rsa_key:
+ type: string
+ description: id_rsa file contents for the vms
+
+ net_1_name:
+ type: string
+ description: network 1
+ subnet_1a_name:
+ type: string
+ description: subnet 1a name
+ subnet_1a_cidr:
+ type: string
+ description: subnet 1a cidr
+ subnet_1b_name:
+ type: string
+ description: subnet 1b name
+ subnet_1b_cidr:
+ type: string
+ description: subnet 1b cidr
+ router_1_name:
+ type: string
+ description: router 1 name
+ net_2_name:
+ type: string
+ description: network 2
+ subnet_2a_name:
+ type: string
+ description: subnet 2a name
+ subnet_2a_cidr:
+ type: string
+ description: subnet 2a cidr
+ subnet_2b_name:
+ type: string
+ description: subnet 2b name
+ subnet_2b_cidr:
+ type: string
+ description: subnet 2b cidr
+ router_2_name:
+ type: string
+ description: router 2 name
+
+ secgroup_name:
+ type: string
+ description: security group name
+ secgroup_descr:
+ type: string
+ description: security group slogan
+
+ instance_1_name:
+ type: string
+ description: instance name
+ instance_2_name:
+ type: string
+ description: instance name
+ instance_3_name:
+ type: string
+ description: instance name
+ instance_4_name:
+ type: string
+ description: instance name
+ instance_5_name:
+ type: string
+ description: instance name
+
+ instance_1_ip:
+ type: string
+ description: instance fixed ip
+ instance_2_ip:
+ type: string
+ description: instance fixed ip
+ instance_3_ip:
+ type: string
+ description: instance fixed ip
+ instance_4_ip:
+ type: string
+ description: instance fixed ip
+ instance_5_ip:
+ type: string
+ description: instance fixed ip
+
+resources:
+ net_1:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_1_name }
+ subnet_1a:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_1a_name }
+ network: { get_resource: net_1 }
+ cidr: { get_param: subnet_1a_cidr }
+ net_2:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_2_name }
+ subnet_2b:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_2b_name }
+ network: { get_resource: net_2 }
+ cidr: { get_param: subnet_2b_cidr }
+
+ sec_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: { get_param: secgroup_name }
+ description: { get_param: secgroup_descr }
+ rules:
+ - protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+ - protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: 0.0.0.0/0
+
+ vm1:
+ type: OS::Nova::Server
+ depends_on: [ vm2, vm4 ]
+ properties:
+ name: { get_param: instance_1_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - network: { get_resource: net_1 }
+ fixed_ip: { get_param: instance_1_ip }
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ sudo mkdir -p /home/cirros/.ssh/
+ sudo chown cirros:cirros /home/cirros/.ssh/
+ sudo echo $ID_RSA > /home/cirros/.ssh/id_rsa.enc
+ sudo base64 -d /home/cirros/.ssh/id_rsa.enc > /home/cirros/.ssh/id_rsa
+ sudo chown cirros:cirros /home/cirros/.ssh/id_rsa
+ sudo echo $AUTH_KEYS > /home/cirros/.ssh/authorized_keys
+ sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys
+ chmod 700 /home/cirros/.ssh
+ chmod 644 /home/cirros/.ssh/authorized_keys
+ chmod 600 /home/cirros/.ssh/id_rsa
+ echo gocubsgo > cirros_passwd
+ set $IP_VM2 $IP_VM4
+ echo will try to ssh to $IP_VM2 and $IP_VM4
+ while true; do
+ for i do
+ ip=$i
+ hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa cirros@$ip 'hostname' </dev/zero 2>/dev/null)
+ RES=$?
+ echo $RES
+ if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;
+ else echo $ip 'not reachable';fi;
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM2: { get_param: instance_2_ip }
+ $IP_VM4: { get_param: instance_4_ip }
+ $ID_RSA: { get_param: id_rsa_key }
+ $AUTH_KEYS: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e\
+ stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9\
+ sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU\
+ ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= cirros@test1"
+ vm2:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_2_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - network: { get_resource: net_1 }
+ fixed_ip: { get_param: instance_2_ip }
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ sudo mkdir -p /home/cirros/.ssh/
+ sudo chown cirros:cirros /home/cirros/.ssh/
+ sudo echo $ID_RSA > /home/cirros/.ssh/id_rsa.enc
+ sudo base64 -d /home/cirros/.ssh/id_rsa.enc > /home/cirros/.ssh/id_rsa
+ sudo chown cirros:cirros /home/cirros/.ssh/id_rsa
+ sudo echo $AUTH_KEYS > /home/cirros/.ssh/authorized_keys
+ sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys
+ chmod 700 /home/cirros/.ssh
+ chmod 644 /home/cirros/.ssh/authorized_keys
+ chmod 600 /home/cirros/.ssh/id_rsa
+ params:
+ $ID_RSA: { get_param: id_rsa_key }
+ $AUTH_KEYS: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e\
+ stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9\
+ sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU\
+ ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= cirros@test1"
+ vm4:
+ type: OS::Nova::Server
+ depends_on: vm2
+ properties:
+ name: { get_param: instance_4_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - network: { get_resource: net_2 }
+ fixed_ip: { get_param: instance_4_ip }
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ sudo mkdir -p /home/cirros/.ssh/
+ sudo chown cirros:cirros /home/cirros/.ssh/
+ sudo echo $ID_RSA > /home/cirros/.ssh/id_rsa.enc
+ sudo base64 -d /home/cirros/.ssh/id_rsa.enc > /home/cirros/.ssh/id_rsa
+ sudo chown cirros:cirros /home/cirros/.ssh/id_rsa
+ sudo echo $AUTH_KEYS > /home/cirros/.ssh/authorized_keys
+ sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys
+ chmod 700 /home/cirros/.ssh
+ chmod 644 /home/cirros/.ssh/authorized_keys
+ chmod 600 /home/cirros/.ssh/id_rsa
+ set $IP_VM1
+ echo will try to ssh to $IP_VM1
+ while true; do
+ for i do
+ ip=$i
+ hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa cirros@$ip 'hostname' </dev/zero 2>/dev/null)
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;
+ else echo $ip 'not reachable';fi;
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM1: { get_param: instance_1_ip }
+ $ID_RSA: { get_param: id_rsa_key }
+ $AUTH_KEYS: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e\
+ stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9\
+ sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU\
+ ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= cirros@test1"
+ $DROPBEAR_PASSWORD: gocubsgo
+outputs:
+ net_1_o:
+ description: the id of network 1
+ value: { get_attr: [net_1, show, id] }
+ net_2_o:
+ description: the id of network 2
+ value: { get_attr: [net_2, show, id] }
+
+ vm1_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm1, show, name] }
+ vm2_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm2, show, name] }
+ vm3_o:
+ description: dummy
+ value: { get_attr: [vm2, show, name] }
+ vm4_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm4, show, name] }
+ vm5_o:
+ description: dummy
+ value: { get_attr: [vm2, show, name] }
diff --git a/sdnvpn/artifacts/testcase_4bis.yaml b/sdnvpn/artifacts/testcase_4bis.yaml
new file mode 100644
index 0000000..ee59e1d
--- /dev/null
+++ b/sdnvpn/artifacts/testcase_4bis.yaml
@@ -0,0 +1,247 @@
+heat_template_version: 2013-05-23
+
+description: >
+ Template for SDNVPN testcase 4
+ VPN provides connectivity between subnets using router association
+
+parameters:
+ flavor:
+ type: string
+ description: flavor for the servers to be created
+ constraints:
+ - custom_constraint: nova.flavor
+ image_n:
+ type: string
+ description: image for the servers to be created
+ constraints:
+ - custom_constraint: glance.image
+ av_zone_1:
+ type: string
+ description: availability zone 1
+ av_zone_2:
+ type: string
+ description: availability zone 2
+
+ net_1_name:
+ type: string
+ description: network 1
+ subnet_1_name:
+ type: string
+ description: subnet 1 name
+ subnet_1_cidr:
+ type: string
+ description: subnet 1 cidr
+ router_1_name:
+ type: string
+ description: router 1 cidr
+ net_2_name:
+ type: string
+ description: network 2
+ subnet_2_name:
+ type: string
+ description: subnet 2 name
+ subnet_2_cidr:
+ type: string
+ description: subnet 1 cidr
+
+ secgroup_name:
+ type: string
+ description: security group name
+ secgroup_descr:
+ type: string
+ description: security group slogan
+
+ instance_1_name:
+ type: string
+ description: instance name
+ instance_2_name:
+ type: string
+ description: instance name
+ instance_3_name:
+ type: string
+ description: instance name
+ instance_4_name:
+ type: string
+ description: instance name
+ instance_5_name:
+ type: string
+ description: instance name
+
+ ping_count:
+ type: string
+ description: ping count for user data script
+ default: 10
+
+resources:
+ net_1:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_1_name }
+ subnet_1:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_1_name }
+ network: { get_resource: net_1 }
+ cidr: { get_param: subnet_1_cidr }
+ router_1:
+ type: OS::Neutron::Router
+ properties:
+ name: { get_param: router_1_name }
+ routerinterface_1:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router_1 }
+ subnet_id: { get_resource: subnet_1 }
+
+ net_2:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_2_name }
+ subnet_2:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_2_name }
+ network: { get_resource: net_2 }
+ cidr: { get_param: subnet_2_cidr }
+
+ sec_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: { get_param: secgroup_name }
+ description: { get_param: secgroup_descr }
+ rules:
+ - protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+ - protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: 0.0.0.0/0
+
+ vm1:
+ type: OS::Nova::Server
+ depends_on: [ vm2, vm3, vm4, vm5 ]
+ properties:
+ name: { get_param: instance_1_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ config_drive: True
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ set $IP_VM2 $IP_VM3 $IP_VM4 $IP_VM5
+ while true; do
+ for i do
+ ip=$i
+ ping -c $COUNT $ip 2>&1 >/dev/null
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ] ; then
+ echo ping $ip OK
+ else echo ping $ip KO
+ fi
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM2: { get_attr: [vm2, addresses, { get_resource: net_1}, 0, addr] }
+ $IP_VM3: { get_attr: [vm3, addresses, { get_resource: net_1}, 0, addr] }
+ $IP_VM4: { get_attr: [vm4, addresses, { get_resource: net_2}, 0, addr] }
+ $IP_VM5: { get_attr: [vm5, addresses, { get_resource: net_2}, 0, addr] }
+ $COUNT: { get_param: ping_count }
+ vm2:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_2_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ vm3:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_3_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_2 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ vm4:
+ type: OS::Nova::Server
+ depends_on: vm5
+ properties:
+ name: { get_param: instance_4_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_2 }
+ config_drive: True
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ set $IP_VM5
+ while true; do
+ for i do
+ ip=$i
+ ping -c $COUNT $ip 2>&1 >/dev/null
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ] ; then
+ echo ping $ip OK
+ else echo ping $ip KO
+ fi
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM5: { get_attr: [vm5, addresses, { get_resource: net_2}, 0, addr] }
+ $COUNT: { get_param: ping_count }
+
+ vm5:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_5_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_2 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_2 }
+
+outputs:
+ router_1_o:
+ description: the id of network 1
+ value: { get_attr: [router_1, show, id] }
+ net_2_o:
+ description: the id of network 2
+ value: { get_attr: [net_2, show, id] }
+ vm1_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm1, show, name] }
+ vm2_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm2, show, name] }
+ vm3_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm3, show, name] }
+ vm4_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm4, show, name] }
+ vm5_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm5, show, name] }
diff --git a/sdnvpn/artifacts/testcase_8bis.yaml b/sdnvpn/artifacts/testcase_8bis.yaml
new file mode 100644
index 0000000..94853c3
--- /dev/null
+++ b/sdnvpn/artifacts/testcase_8bis.yaml
@@ -0,0 +1,173 @@
+heat_template_version: 2013-05-23
+
+description: >
+ Template for SDNVPN testcase 8
+ Test floating IP and router assoc coexistence
+
+parameters:
+ flavor:
+ type: string
+ description: flavor for the servers to be created
+ constraints:
+ - custom_constraint: nova.flavor
+ image_n:
+ type: string
+ description: image for the servers to be created
+ constraints:
+ - custom_constraint: glance.image
+ av_zone_1:
+ type: string
+ description: availability zone 1
+
+ external_nw:
+ type: string
+ description: the external network
+ net_1_name:
+ type: string
+ description: network 1
+ subnet_1_name:
+ type: string
+ description: subnet 1 name
+ subnet_1_cidr:
+ type: string
+ description: subnet 1 cidr
+ router_1_name:
+ type: string
+ description: router 1 cidr
+ net_2_name:
+ type: string
+ description: network 2
+ subnet_2_name:
+ type: string
+ description: subnet 2 name
+ subnet_2_cidr:
+ type: string
+ description: subnet 1 cidr
+
+ secgroup_name:
+ type: string
+ description: security group name
+ secgroup_descr:
+ type: string
+ description: security group slogan
+
+ instance_1_name:
+ type: string
+ description: instance name
+ instance_2_name:
+ type: string
+ description: instance name
+
+ ping_count:
+ type: string
+ description: ping count for user data script
+ default: 10
+
+resources:
+ router_1:
+ type: OS::Neutron::Router
+ properties:
+ name: { get_param: router_1_name }
+ external_gateway_info:
+ network: { get_param: external_nw }
+
+ net_1:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_1_name }
+ subnet_1:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_1_name }
+ network: { get_resource: net_1 }
+ cidr: { get_param: subnet_1_cidr }
+ routerinterface_1:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router_1 }
+ subnet_id: { get_resource: subnet_1 }
+
+ net_2:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_2_name }
+ subnet_2:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_2_name }
+ network: { get_resource: net_2 }
+ cidr: { get_param: subnet_2_cidr }
+
+ sec_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: { get_param: secgroup_name }
+ description: { get_param: secgroup_descr }
+ rules:
+ - protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+ - protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: 0.0.0.0/0
+
+ vm1:
+ type: OS::Nova::Server
+ depends_on: [ vm2 ]
+ properties:
+ name: { get_param: instance_1_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ config_drive: True
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ set $IP_VM2
+ while true; do
+ for i do
+ ip=$i
+ ping -c $COUNT $ip 2>&1 >/dev/null
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ] ; then
+ echo ping $ip OK
+ else echo ping $ip KO
+ fi
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM2: { get_attr: [vm2, addresses, { get_resource: net_1}, 0, addr] }
+ $COUNT: { get_param: ping_count }
+ vm2:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_2_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_2 }
+
+
+outputs:
+ router_1_o:
+ description: the id of network 1
+ value: { get_attr: [router_1, show, id] }
+ net_2_o:
+ description: the id of network 2
+ value: { get_attr: [net_2, show, id] }
+ vm1_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm1, show, name] }
+ vm2_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm2, show, name] }
diff --git a/sdnvpn/artifacts/testcase_8bis_upd.yaml b/sdnvpn/artifacts/testcase_8bis_upd.yaml
new file mode 100644
index 0000000..4661e8a
--- /dev/null
+++ b/sdnvpn/artifacts/testcase_8bis_upd.yaml
@@ -0,0 +1,17 @@
+heat_template_version: 2013-05-23
+
+resources:
+ fip_1:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: { get_param: external_nw }
+ fip_1_assoc:
+ type: OS::Neutron::FloatingIPAssociation
+ properties:
+ floatingip_id: { get_resource: fip_1 }
+ port_id: {get_attr: [vm1, addresses, {get_resource: net_1}, 0, port]}
+
+outputs:
+ fip_1_o:
+ description: the floating IP for vm1
+ value: { get_attr: [fip_1, show, floating_ip_address] }
diff --git a/sdnvpn/lib/config.py b/sdnvpn/lib/config.py
index 9659fc3..847b41c 100644
--- a/sdnvpn/lib/config.py
+++ b/sdnvpn/lib/config.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -7,11 +7,11 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
-import yaml
import logging
+import yaml
import pkg_resources
-from functest.utils.constants import CONST
+from functest.utils import config
import functest.utils.functest_utils as ft_utils
logger = logging.getLogger('sdnvpn_test_config')
@@ -31,24 +31,29 @@ class CommonConfig(object):
'sdnvpn', 'test/functest/config.yaml')
self.keyfile_path = pkg_resources.resource_filename(
'sdnvpn', 'artifacts/id_rsa')
- self.test_db = CONST.results_test_db_url
self.quagga_setup_script_path = pkg_resources.resource_filename(
'sdnvpn', 'artifacts/quagga_setup.sh')
self.line_length = 90 # length for the summary table
self.vm_boot_timeout = 180
self.default_flavor = ft_utils.get_parameter_from_yaml(
"defaults.flavor", self.config_file)
- self.image_filename = CONST.openstack_image_file_name
- self.image_format = CONST.openstack_image_disk_format
- self.image_path = '{0}/{1}'.format(CONST.dir_functest_images,
- self.image_filename)
+ self.default_flavor_ram = 512
+ self.default_flavor_disk = 1
+ self.default_flavor_vcpus = 1
+ self.image_filename = getattr(
+ config.CONF, 'openstack_image_file_name')
+ self.image_format = getattr(
+ config.CONF, 'openstack_image_disk_format')
+ self.image_path = '{0}/{1}'.format(
+ getattr(config.CONF, 'dir_functest_images'),
+ self.image_filename)
# This is the ubuntu image used by sfc
# Basically vanilla ubuntu + some scripts in there
# We can use it to setup a quagga instance
# TODO does functest have an ubuntu image somewhere?
self.ubuntu_image_name = "sdnvpn-ubuntu"
self.ubuntu_image_path = '{0}/{1}'.format(
- CONST.dir_functest_images,
+ getattr(config.CONF, 'dir_functest_data'),
"ubuntu-16.04-server-cloudimg-amd64-disk1.img")
self.custom_flavor_name = 'm1.custom'
self.custom_flavor_ram = 1024
@@ -57,6 +62,7 @@ class CommonConfig(object):
self.neutron_nw_quota = -1
self.neutron_subnet_quota = -1
self.neutron_port_quota = -1
+ self.neutron_router_quota = -1
self.nova_instances_quota_class = -1
commonCfgInstance = None
diff --git a/sdnvpn/lib/gather_logs.py b/sdnvpn/lib/gather_logs.py
index ed95fac..cf37acf 100644
--- a/sdnvpn/lib/gather_logs.py
+++ b/sdnvpn/lib/gather_logs.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -19,7 +21,7 @@ import inspect
import sdnvpn.lib.utils as test_utils
import functest.utils.functest_utils as ft_utils
-from functest.utils.constants import CONST
+from functest.utils import config
LIB_PATH = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
@@ -44,10 +46,9 @@ def gather_logs(name):
'tar -xzvf /tmp/log_output-%s.tar.gz --strip-components=1'
% node.get_dict()['name'])
- ft_utils.execute_command_raise('cd %s;tar czvf sdnvpn-logs-%s.tar.gz'
- ' /tmp/sdnvpn-logs/'
- % (CONST.__getattribute__('dir_results'),
- name))
+ ft_utils.execute_command_raise(
+ 'cd %s;tar czvf sdnvpn-logs-%s.tar.gz /tmp/sdnvpn-logs/' % (
+ getattr(config.CONF, 'dir_results'), name))
if __name__ == '__main__':
diff --git a/sdnvpn/lib/openstack_utils.py b/sdnvpn/lib/openstack_utils.py
new file mode 100644
index 0000000..5fc1e49
--- /dev/null
+++ b/sdnvpn/lib/openstack_utils.py
@@ -0,0 +1,1455 @@
+#!/usr/bin/env python
+#
+# jose.lausuch@ericsson.com
+# valentin.boucher@orange.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import base64
+import logging
+import os.path
+import shutil
+import sys
+import time
+import urllib
+
+from keystoneauth1 import loading
+from keystoneauth1 import session
+from keystoneclient import client as keystoneclient
+from neutronclient.neutron import client as neutronclient
+from openstack import connection
+from openstack import cloud as os_cloud
+from openstack.exceptions import ResourceNotFound
+
+from functest.utils import env
+
+logger = logging.getLogger(__name__)
+
+DEFAULT_API_VERSION = '2'
+
+
+# *********************************************
+# CREDENTIALS
+# *********************************************
+class MissingEnvVar(Exception):
+
+ def __init__(self, var):
+ self.var = var
+
+ def __str__(self):
+ return str.format("Please set the mandatory env var: {}", self.var)
+
+
+def get_os_connection():
+ return connection.from_config()
+
+
+def get_os_cloud():
+ return os_cloud.openstack_cloud()
+
+
+def is_keystone_v3():
+ keystone_api_version = os.getenv('OS_IDENTITY_API_VERSION')
+ if (keystone_api_version is None or
+ keystone_api_version == '2'):
+ return False
+ else:
+ return True
+
+
+def get_rc_env_vars():
+ env_vars = ['OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD']
+ if is_keystone_v3():
+ env_vars.extend(['OS_PROJECT_NAME',
+ 'OS_USER_DOMAIN_NAME',
+ 'OS_PROJECT_DOMAIN_NAME'])
+ else:
+ env_vars.extend(['OS_TENANT_NAME'])
+ return env_vars
+
+
+def check_credentials():
+ """
+ Check if the OpenStack credentials (openrc) are sourced
+ """
+ env_vars = get_rc_env_vars()
+ return all(map(lambda v: v in os.environ and os.environ[v], env_vars))
+
+
+def get_env_cred_dict():
+ env_cred_dict = {
+ 'OS_USERNAME': 'username',
+ 'OS_PASSWORD': 'password',
+ 'OS_AUTH_URL': 'auth_url',
+ 'OS_TENANT_NAME': 'tenant_name',
+ 'OS_USER_DOMAIN_NAME': 'user_domain_name',
+ 'OS_PROJECT_DOMAIN_NAME': 'project_domain_name',
+ 'OS_PROJECT_NAME': 'project_name',
+ 'OS_ENDPOINT_TYPE': 'endpoint_type',
+ 'OS_REGION_NAME': 'region_name',
+ 'OS_CACERT': 'https_cacert',
+ 'OS_INSECURE': 'https_insecure'
+ }
+ return env_cred_dict
+
+
+def get_credentials(other_creds={}):
+ """Returns a creds dictionary filled with parsed from env
+ """
+ creds = {}
+ env_vars = get_rc_env_vars()
+ env_cred_dict = get_env_cred_dict()
+
+ for envvar in env_vars:
+ if os.getenv(envvar) is None:
+ raise MissingEnvVar(envvar)
+ else:
+ creds_key = env_cred_dict.get(envvar)
+ creds.update({creds_key: os.getenv(envvar)})
+
+ if 'tenant' in other_creds.keys():
+ if is_keystone_v3():
+ tenant = 'project_name'
+ else:
+ tenant = 'tenant_name'
+ other_creds[tenant] = other_creds.pop('tenant')
+
+ creds.update(other_creds)
+
+ return creds
+
+
+def get_session_auth(other_creds={}):
+ loader = loading.get_plugin_loader('password')
+ creds = get_credentials(other_creds)
+ auth = loader.load_from_options(**creds)
+ return auth
+
+
+def get_endpoint(service_type, interface='public'):
+ auth = get_session_auth()
+ return get_session().get_endpoint(auth=auth,
+ service_type=service_type,
+ interface=interface)
+
+
+def get_session(other_creds={}):
+ auth = get_session_auth(other_creds)
+ https_cacert = os.getenv('OS_CACERT', '')
+ https_insecure = os.getenv('OS_INSECURE', '').lower() == 'true'
+ return session.Session(auth=auth,
+ verify=(https_cacert or not https_insecure))
+
+
+# *********************************************
+# CLIENTS
+# *********************************************
+def get_keystone_client_version():
+ api_version = os.getenv('OS_IDENTITY_API_VERSION')
+ if api_version is not None:
+ logger.info("OS_IDENTITY_API_VERSION is set in env as '%s'",
+ api_version)
+ return api_version
+ return DEFAULT_API_VERSION
+
+
+def get_keystone_client(other_creds={}):
+ sess = get_session(other_creds)
+ return keystoneclient.Client(get_keystone_client_version(),
+ session=sess,
+ interface=os.getenv('OS_INTERFACE', 'admin'))
+
+
+def get_neutron_client_version():
+ api_version = os.getenv('OS_NETWORK_API_VERSION')
+ if api_version is not None:
+ logger.info("OS_NETWORK_API_VERSION is set in env as '%s'",
+ api_version)
+ return api_version
+ return DEFAULT_API_VERSION
+
+
+def get_neutron_client(other_creds={}):
+ sess = get_session(other_creds)
+ return neutronclient.Client(get_neutron_client_version(), session=sess)
+
+
+def download_url(url, dest_path):
+ """
+ Download a file to a destination path given a URL
+ """
+ name = url.rsplit('/')[-1]
+ dest = dest_path + "/" + name
+ try:
+ response = urllib.urlopen(url)
+ except Exception:
+ return False
+
+ with open(dest, 'wb') as lfile:
+ shutil.copyfileobj(response, lfile)
+ return True
+
+
+def download_and_add_image_on_glance(conn, image_name, image_url, data_dir):
+ try:
+ dest_path = data_dir
+ if not os.path.exists(dest_path):
+ os.makedirs(dest_path)
+ file_name = image_url.rsplit('/')[-1]
+ if not download_url(image_url, dest_path):
+ return False
+ except Exception:
+ raise Exception("Impossible to download image from {}".format(
+ image_url))
+
+ try:
+ image = create_glance_image(
+ conn, image_name, dest_path + file_name)
+ if not image:
+ return False
+ else:
+ return image
+ except Exception:
+ raise Exception("Impossible to put image {} in glance".format(
+ image_name))
+
+
+# *********************************************
+# NOVA
+# *********************************************
+def get_instances(conn):
+ try:
+ instances = conn.compute.servers(all_tenants=1)
+ return instances
+ except Exception as e:
+ logger.error("Error [get_instances(compute)]: %s" % e)
+ return None
+
+
+def get_instance_status(conn, instance):
+ try:
+ instance = conn.compute.get_server(instance.id)
+ return instance.status
+ except Exception as e:
+ logger.error("Error [get_instance_status(compute)]: %s" % e)
+ return None
+
+
+def get_instance_by_name(conn, instance_name):
+ try:
+ instance = conn.compute.find_server(instance_name,
+ ignore_missing=False)
+ return instance
+ except Exception as e:
+ logger.error("Error [get_instance_by_name(compute, '%s')]: %s"
+ % (instance_name, e))
+ return None
+
+
+def get_flavor_id(conn, flavor_name):
+ flavors = conn.compute.flavors()
+ id = ''
+ for f in flavors:
+ if f.name == flavor_name:
+ id = f.id
+ break
+ return id
+
+
+def get_flavor_id_by_ram_range(conn, min_ram, max_ram):
+ flavors = conn.compute.flavors()
+ id = ''
+ for f in flavors:
+ if min_ram <= f.ram and f.ram <= max_ram:
+ id = f.id
+ break
+ return id
+
+
+def get_aggregates(cloud):
+ try:
+ aggregates = cloud.list_aggregates()
+ return aggregates
+ except Exception as e:
+ logger.error("Error [get_aggregates(compute)]: %s" % e)
+ return None
+
+
+def get_aggregate_id(cloud, aggregate_name):
+ try:
+ aggregates = get_aggregates(cloud)
+ _id = [ag.id for ag in aggregates if ag.name == aggregate_name][0]
+ return _id
+ except Exception as e:
+ logger.error("Error [get_aggregate_id(compute, %s)]:"
+ " %s" % (aggregate_name, e))
+ return None
+
+
+def get_availability_zones(conn):
+ try:
+ availability_zones = conn.compute.availability_zones()
+ return availability_zones
+ except Exception as e:
+ logger.error("Error [get_availability_zones(compute)]: %s" % e)
+ return None
+
+
+def get_availability_zone_names(conn):
+ try:
+ az_names = [az.zoneName for az in get_availability_zones(conn)]
+ return az_names
+ except Exception as e:
+ logger.error("Error [get_availability_zone_names(compute)]:"
+ " %s" % e)
+ return None
+
+
+def create_flavor(conn, flavor_name, ram, disk, vcpus, public=True):
+ try:
+ flavor = conn.compute.create_flavor(
+ name=flavor_name, ram=ram, disk=disk, vcpus=vcpus,
+ is_public=public)
+ except Exception as e:
+ logger.error("Error [create_flavor(compute, '%s', '%s', '%s', "
+ "'%s')]: %s" % (flavor_name, ram, disk, vcpus, e))
+ return None
+ return flavor.id
+
+
+def get_or_create_flavor(flavor_name, ram, disk, vcpus, public=True):
+ flavor_exists = False
+ conn = get_os_connection()
+
+ flavor_id = get_flavor_id(conn, flavor_name)
+ if flavor_id != '':
+ logger.info("Using existing flavor '%s'..." % flavor_name)
+ flavor_exists = True
+ else:
+ logger.info("Creating flavor '%s' with '%s' RAM, '%s' disk size, "
+ "'%s' vcpus..." % (flavor_name, ram, disk, vcpus))
+ flavor_id = create_flavor(
+ conn, flavor_name, ram, disk, vcpus, public=public)
+ if not flavor_id:
+ raise Exception("Failed to create flavor '%s'..." % (flavor_name))
+ else:
+ logger.debug("Flavor '%s' with ID=%s created successfully."
+ % (flavor_name, flavor_id))
+
+ return flavor_exists, flavor_id
+
+
+def get_floating_ips(conn):
+ try:
+ floating_ips = conn.network.ips()
+ return floating_ips
+ except Exception as e:
+ logger.error("Error [get_floating_ips(network)]: %s" % e)
+ return None
+
+
+def get_hypervisors(conn):
+ try:
+ nodes = []
+ hypervisors = conn.compute.hypervisors()
+ for hypervisor in hypervisors:
+ if hypervisor.state == "up":
+ nodes.append(hypervisor.name)
+ return nodes
+ except Exception as e:
+ logger.error("Error [get_hypervisors(compute)]: %s" % e)
+ return None
+
+
+def create_aggregate(cloud, aggregate_name, av_zone):
+ try:
+ cloud.create_aggregate(aggregate_name, av_zone)
+ return True
+ except Exception as e:
+ logger.error("Error [create_aggregate(compute, %s, %s)]: %s"
+ % (aggregate_name, av_zone, e))
+ return None
+
+
+def add_host_to_aggregate(cloud, aggregate_name, compute_host):
+ try:
+ aggregate_id = get_aggregate_id(cloud, aggregate_name)
+ cloud.add_host_to_aggregate(aggregate_id, compute_host)
+ return True
+ except Exception as e:
+ logger.error("Error [add_host_to_aggregate(compute, %s, %s)]: %s"
+ % (aggregate_name, compute_host, e))
+ return None
+
+
+def create_aggregate_with_host(
+ cloud, aggregate_name, av_zone, compute_host):
+ try:
+ create_aggregate(cloud, aggregate_name, av_zone)
+ add_host_to_aggregate(cloud, aggregate_name, compute_host)
+ return True
+ except Exception as e:
+ logger.error("Error [create_aggregate_with_host("
+ "compute, %s, %s, %s)]: %s"
+ % (aggregate_name, av_zone, compute_host, e))
+ return None
+
+
+def create_instance(flavor_name,
+ image_id,
+ network_id,
+ instance_name="functest-vm",
+ confdrive=True,
+ userdata=None,
+ av_zone=None,
+ fixed_ip=None,
+ files=[]):
+ conn = get_os_connection()
+ try:
+ flavor = conn.compute.find_flavor(flavor_name, ignore_missing=False)
+ except Exception:
+ flavors = [flavor.name for flavor in conn.compute.flavors()]
+ logger.error("Error: Flavor '%s' not found. Available flavors are: "
+ "\n%s" % (flavor_name, flavors))
+ return None
+ if fixed_ip is not None:
+ networks = {"uuid": network_id, "fixed_ip": fixed_ip}
+ else:
+ networks = {"uuid": network_id}
+
+ server_attrs = {
+ 'name': instance_name,
+ 'flavor_id': flavor.id,
+ 'image_id': image_id,
+ 'networks': [networks],
+ 'personality': files
+ }
+ if userdata is not None:
+ server_attrs['config_drive'] = confdrive
+ server_attrs['user_data'] = base64.b64encode(userdata.encode())
+ if av_zone is not None:
+ server_attrs['availability_zone'] = av_zone
+
+ instance = conn.compute.create_server(**server_attrs)
+ return instance
+
+
+def create_instance_and_wait_for_active(flavor_name,
+ image_id,
+ network_id,
+ instance_name="",
+ config_drive=False,
+ userdata="",
+ av_zone=None,
+ fixed_ip=None,
+ files=[]):
+ SLEEP = 3
+ VM_BOOT_TIMEOUT = 180
+ conn = get_os_connection()
+ instance = create_instance(flavor_name,
+ image_id,
+ network_id,
+ instance_name,
+ config_drive,
+ userdata,
+ av_zone=av_zone,
+ fixed_ip=fixed_ip,
+ files=files)
+ count = VM_BOOT_TIMEOUT / SLEEP
+ for n in range(count, -1, -1):
+ status = get_instance_status(conn, instance)
+ if status is None:
+ time.sleep(SLEEP)
+ continue
+ elif status.lower() == "active":
+ return instance
+ elif status.lower() == "error":
+ logger.error("The instance %s went to ERROR status."
+ % instance_name)
+ return None
+ time.sleep(SLEEP)
+ logger.error("Timeout booting the instance %s." % instance_name)
+ return None
+
+
+def create_floating_ip(conn):
+ extnet_id = get_external_net_id(conn)
+ try:
+ fip = conn.network.create_ip(floating_network_id=extnet_id)
+ fip_addr = fip.floating_ip_address
+ fip_id = fip.id
+ except Exception as e:
+ logger.error("Error [create_floating_ip(network)]: %s" % e)
+ return None
+ return {'fip_addr': fip_addr, 'fip_id': fip_id}
+
+
+def attach_floating_ip(conn, port_id):
+ extnet_id = get_external_net_id(conn)
+ try:
+ return conn.network.create_ip(floating_network_id=extnet_id,
+ port_id=port_id)
+ except Exception as e:
+ logger.error("Error [Attach_floating_ip(network), %s]: %s"
+ % (port_id, e))
+ return None
+
+
+def add_floating_ip(conn, server_id, floatingip_addr):
+ try:
+ conn.compute.add_floating_ip_to_server(server_id, floatingip_addr)
+ return True
+ except Exception as e:
+ logger.error("Error [add_floating_ip(compute, '%s', '%s')]: %s"
+ % (server_id, floatingip_addr, e))
+ return False
+
+
+def delete_instance(conn, instance_id):
+ try:
+ conn.compute.delete_server(instance_id, force=True)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_instance(compute, '%s')]: %s"
+ % (instance_id, e))
+ return False
+
+
+def delete_floating_ip(conn, floatingip_id):
+ try:
+ conn.network.delete_ip(floatingip_id)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_floating_ip(network, '%s')]: %s"
+ % (floatingip_id, e))
+ return False
+
+
+def remove_host_from_aggregate(cloud, aggregate_name, compute_host):
+ try:
+ aggregate_id = get_aggregate_id(cloud, aggregate_name)
+ cloud.remove_host_from_aggregate(aggregate_id, compute_host)
+ return True
+ except Exception as e:
+ logger.error("Error [remove_host_from_aggregate(compute, %s, %s)]:"
+ " %s" % (aggregate_name, compute_host, e))
+ return False
+
+
+def remove_hosts_from_aggregate(cloud, aggregate_name):
+ aggregate_id = get_aggregate_id(cloud, aggregate_name)
+ hosts = cloud.get_aggregate(aggregate_id).hosts
+ assert(
+ all(remove_host_from_aggregate(cloud, aggregate_name, host)
+ for host in hosts))
+
+
+def delete_aggregate(cloud, aggregate_name):
+ try:
+ remove_hosts_from_aggregate(cloud, aggregate_name)
+ cloud.delete_aggregate(aggregate_name)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_aggregate(compute, %s)]: %s"
+ % (aggregate_name, e))
+ return False
+
+
+# *********************************************
+# NEUTRON
+# *********************************************
+def get_network_list(conn):
+ return conn.network.networks()
+
+
+def get_router_list(conn):
+ return conn.network.routers()
+
+
+def get_port_list(conn):
+ return conn.network.ports()
+
+
+def get_network_id(conn, network_name):
+ networks = conn.network.networks()
+ id = ''
+ for n in networks:
+ if n.name == network_name:
+ id = n.id
+ break
+ return id
+
+
+def get_subnet_id(conn, subnet_name):
+ subnets = conn.network.subnets()
+ id = ''
+ for s in subnets:
+ if s.name == subnet_name:
+ id = s.id
+ break
+ return id
+
+
+def get_router_id(conn, router_name):
+ routers = conn.network.routers()
+ id = ''
+ for r in routers:
+ if r.name == router_name:
+ id = r.id
+ break
+ return id
+
+
+def get_private_net(conn):
+ # Checks if there is an existing shared private network
+ networks = conn.network.networks()
+ for net in networks:
+ if (net.is_router_external is False) and (net.is_shared is True):
+ return net
+ return None
+
+
+def get_external_net(conn):
+ if (env.get('EXTERNAL_NETWORK')):
+ return env.get('EXTERNAL_NETWORK')
+ for network in conn.network.networks():
+ if network.is_router_external:
+ return network.name
+ return None
+
+
+def get_external_net_id(conn):
+ if (env.get('EXTERNAL_NETWORK')):
+ networks = conn.network.networks(name=env.get('EXTERNAL_NETWORK'))
+ net_id = networks.next().id
+ return net_id
+ for network in conn.network.networks():
+ if network.is_router_external:
+ return network.id
+ return None
+
+
+def check_neutron_net(conn, net_name):
+ for network in conn.network.networks():
+ if network.name == net_name:
+ for subnet in network.subnet_ids:
+ return True
+ return False
+
+
+def create_neutron_net(conn, name):
+ try:
+ network = conn.network.create_network(name=name)
+ return network.id
+ except Exception as e:
+ logger.error("Error [create_neutron_net(network, '%s')]: %s"
+ % (name, e))
+ return None
+
+
+def create_neutron_subnet(conn, name, cidr, net_id,
+ dns=['8.8.8.8', '8.8.4.4']):
+ try:
+ subnet = conn.network.create_subnet(name=name,
+ cidr=cidr,
+ ip_version='4',
+ network_id=net_id,
+ dns_nameservers=dns)
+ return subnet.id
+ except Exception as e:
+ logger.error("Error [create_neutron_subnet(network, '%s', "
+ "'%s', '%s')]: %s" % (name, cidr, net_id, e))
+ return None
+
+
+def create_neutron_router(conn, name):
+ try:
+ router = conn.network.create_router(name=name)
+ return router.id
+ except Exception as e:
+ logger.error("Error [create_neutron_router(network, '%s')]: %s"
+ % (name, e))
+ return None
+
+
+def create_neutron_port(conn, name, network_id, ip):
+ try:
+ port = conn.network.create_port(name=name,
+ network_id=network_id,
+ fixed_ips=[{'ip_address': ip}])
+ return port.id
+ except Exception as e:
+ logger.error("Error [create_neutron_port(network, '%s', '%s', "
+ "'%s')]: %s" % (name, network_id, ip, e))
+ return None
+
+
+def update_neutron_net(conn, network_id, shared=False):
+ try:
+ conn.network.update_network(network_id, is_shared=shared)
+ return True
+ except Exception as e:
+ logger.error("Error [update_neutron_net(network, '%s', '%s')]: "
+ "%s" % (network_id, str(shared), e))
+ return False
+
+
+def update_neutron_port(conn, port_id, device_owner):
+ try:
+ port = conn.network.update_port(port_id, device_owner=device_owner)
+ return port.id
+ except Exception as e:
+ logger.error("Error [update_neutron_port(network, '%s', '%s')]:"
+ " %s" % (port_id, device_owner, e))
+ return None
+
+
+def add_interface_router(conn, router_id, subnet_id):
+ try:
+ conn.network.add_interface_to_router(router_id, subnet_id=subnet_id)
+ return True
+ except Exception as e:
+ logger.error("Error [add_interface_router(network, '%s', "
+ "'%s')]: %s" % (router_id, subnet_id, e))
+ return False
+
+
+def add_gateway_router(conn, router_id):
+ ext_net_id = get_external_net_id(conn)
+ router_dict = {'network_id': ext_net_id}
+ try:
+ conn.network.update_router(router_id,
+ external_gateway_info=router_dict)
+ return True
+ except Exception as e:
+ logger.error("Error [add_gateway_router(network, '%s')]: %s"
+ % (router_id, e))
+ return False
+
+
+def delete_neutron_net(conn, network_id):
+ try:
+ conn.network.delete_network(network_id, ignore_missing=False)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_neutron_net(network, '%s')]: %s"
+ % (network_id, e))
+ return False
+
+
+def delete_neutron_subnet(conn, subnet_id):
+ try:
+ conn.network.delete_subnet(subnet_id, ignore_missing=False)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_neutron_subnet(network, '%s')]: %s"
+ % (subnet_id, e))
+ return False
+
+
+def delete_neutron_router(conn, router_id):
+ try:
+ conn.network.delete_router(router_id, ignore_missing=False)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_neutron_router(network, '%s')]: %s"
+ % (router_id, e))
+ return False
+
+
+def delete_neutron_port(conn, port_id):
+ try:
+ conn.network.delete_port(port_id, ignore_missing=False)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_neutron_port(network, '%s')]: %s"
+ % (port_id, e))
+ return False
+
+
+def remove_interface_router(conn, router_id, subnet_id):
+ try:
+ conn.network.remove_interface_from_router(router_id,
+ subnet_id=subnet_id)
+ return True
+ except Exception as e:
+ logger.error("Error [remove_interface_router(network, '%s', "
+ "'%s')]: %s" % (router_id, subnet_id, e))
+ return False
+
+
+def remove_gateway_router(conn, router_id):
+ try:
+ conn.network.update_router(router_id, external_gateway_info=None)
+ return True
+ except Exception as e:
+ logger.error("Error [remove_gateway_router(network, '%s')]: %s"
+ % (router_id, e))
+ return False
+
+
+def create_network_full(conn,
+ net_name,
+ subnet_name,
+ router_name,
+ cidr,
+ dns=['8.8.8.8', '8.8.4.4']):
+
+ # Check if the network already exists
+ network_id = get_network_id(conn, net_name)
+ subnet_id = get_subnet_id(conn, subnet_name)
+ router_id = get_router_id(conn, router_name)
+
+ if network_id != '' and subnet_id != '' and router_id != '':
+ logger.info("A network with name '%s' already exists..." % net_name)
+ else:
+ logger.info('Creating neutron network %s...' % net_name)
+ if network_id == '':
+ network_id = create_neutron_net(conn, net_name)
+ if not network_id:
+ return False
+ logger.debug("Network '%s' created successfully" % network_id)
+
+ logger.debug('Creating Subnet....')
+ if subnet_id == '':
+ subnet_id = create_neutron_subnet(conn, subnet_name, cidr,
+ network_id, dns)
+ if not subnet_id:
+ return None
+ logger.debug("Subnet '%s' created successfully" % subnet_id)
+
+ logger.debug('Creating Router...')
+ if router_id == '':
+ router_id = create_neutron_router(conn, router_name)
+ if not router_id:
+ return None
+ logger.debug("Router '%s' created successfully" % router_id)
+
+ logger.debug('Adding router to subnet...')
+
+ if not add_interface_router(conn, router_id, subnet_id):
+ return None
+ logger.debug("Interface added successfully.")
+
+ logger.debug('Adding gateway to router...')
+ if not add_gateway_router(conn, router_id):
+ return None
+ logger.debug("Gateway added successfully.")
+
+ network_dic = {'net_id': network_id,
+ 'subnet_id': subnet_id,
+ 'router_id': router_id}
+ return network_dic
+
+
+def create_shared_network_full(net_name, subnt_name, router_name, subnet_cidr):
+ conn = get_os_connection()
+
+ network_dic = create_network_full(conn,
+ net_name,
+ subnt_name,
+ router_name,
+ subnet_cidr)
+ if network_dic:
+ if not update_neutron_net(conn,
+ network_dic['net_id'],
+ shared=True):
+ logger.error("Failed to update network %s..." % net_name)
+ return None
+ else:
+ logger.debug("Network '%s' is available..." % net_name)
+ else:
+ logger.error("Network %s creation failed" % net_name)
+ return None
+ return network_dic
+
+
+# *********************************************
+# SEC GROUPS
+# *********************************************
+
+
+def get_security_groups(conn):
+ return conn.network.security_groups()
+
+
+def get_security_group_id(conn, sg_name):
+ security_groups = get_security_groups(conn)
+ id = ''
+ for sg in security_groups:
+ if sg.name == sg_name:
+ id = sg.id
+ break
+ return id
+
+
+def create_security_group(conn, sg_name, sg_description):
+ try:
+ secgroup = conn.network.\
+ create_security_group(name=sg_name, description=sg_description)
+ return secgroup
+ except Exception as e:
+ logger.error("Error [create_security_group(network, '%s', "
+ "'%s')]: %s" % (sg_name, sg_description, e))
+ return None
+
+
+def create_secgroup_rule(conn, sg_id, direction, protocol,
+ port_range_min=None, port_range_max=None):
+ # We create a security group in 2 steps
+ # 1 - we check the format and set the secgroup rule attributes accordingly
+ # 2 - we call openstacksdk to create the security group
+
+ # Format check
+ secgroup_rule_attrs = {'direction': direction,
+ 'security_group_id': sg_id,
+ 'protocol': protocol}
+ # parameters may be
+ # - both None => we do nothing
+ # - both Not None => we add them to the secgroup rule attributes
+ # but one cannot be None is the other is not None
+ if (port_range_min is not None and port_range_max is not None):
+ # add port_range in secgroup rule attributes
+ secgroup_rule_attrs['port_range_min'] = port_range_min
+ secgroup_rule_attrs['port_range_max'] = port_range_max
+ logger.debug("Security_group format set (port range included)")
+ else:
+ # either both port range are set to None => do nothing
+ # or one is set but not the other => log it and return False
+ if port_range_min is None and port_range_max is None:
+ logger.debug("Security_group format set (no port range mentioned)")
+ else:
+ logger.error("Bad security group format."
+ "One of the port range is not properly set:"
+ "range min: {},"
+ "range max: {}".format(port_range_min,
+ port_range_max))
+ return False
+
+ # Create security group using neutron client
+ try:
+ conn.network.create_security_group_rule(**secgroup_rule_attrs)
+ return True
+ except Exception:
+ logger.exception("Impossible to create_security_group_rule,"
+ "security group rule probably already exists")
+ return False
+
+
+def get_security_group_rules(conn, sg_id):
+ try:
+ security_rules = conn.network.security_group_rules()
+ security_rules = [rule for rule in security_rules
+ if rule.security_group_id == sg_id]
+ return security_rules
+ except Exception as e:
+ logger.error("Error [get_security_group_rules(network, sg_id)]:"
+ " %s" % e)
+ return None
+
+
+def check_security_group_rules(conn, sg_id, direction, protocol,
+ port_min=None, port_max=None):
+ try:
+ security_rules = get_security_group_rules(conn, sg_id)
+ security_rules = [rule for rule in security_rules
+ if (rule.direction.lower() == direction and
+ rule.protocol.lower() == protocol and
+ rule.port_range_min == port_min and
+ rule.port_range_max == port_max)]
+ if len(security_rules) == 0:
+ return True
+ else:
+ return False
+ except Exception as e:
+ logger.error("Error [check_security_group_rules("
+ " network, sg_id, direction,"
+ " protocol, port_min=None, port_max=None)]: "
+ "%s" % e)
+ return None
+
+
+def create_security_group_full(conn,
+ sg_name, sg_description):
+ sg_id = get_security_group_id(conn, sg_name)
+ if sg_id != '':
+ logger.info("Using existing security group '%s'..." % sg_name)
+ else:
+ logger.info("Creating security group '%s'..." % sg_name)
+ SECGROUP = create_security_group(conn,
+ sg_name,
+ sg_description)
+ if not SECGROUP:
+ logger.error("Failed to create the security group...")
+ return None
+
+ sg_id = SECGROUP.id
+
+ logger.debug("Security group '%s' with ID=%s created successfully."
+ % (SECGROUP.name, sg_id))
+
+ logger.debug("Adding ICMP rules in security group '%s'..."
+ % sg_name)
+ if not create_secgroup_rule(conn, sg_id,
+ 'ingress', 'icmp'):
+ logger.error("Failed to create the security group rule...")
+ return None
+
+ logger.debug("Adding SSH rules in security group '%s'..."
+ % sg_name)
+ if not create_secgroup_rule(
+ conn, sg_id, 'ingress', 'tcp', '22', '22'):
+ logger.error("Failed to create the security group rule...")
+ return None
+
+ if not create_secgroup_rule(
+ conn, sg_id, 'egress', 'tcp', '22', '22'):
+ logger.error("Failed to create the security group rule...")
+ return None
+ return sg_id
+
+
+def add_secgroup_to_instance(conn, instance_id, secgroup_id):
+ try:
+ conn.compute.add_security_group_to_server(instance_id, secgroup_id)
+ return True
+ except Exception as e:
+ logger.error("Error [add_secgroup_to_instance(compute, '%s', "
+ "'%s')]: %s" % (instance_id, secgroup_id, e))
+ return False
+
+
+def update_sg_quota(conn, tenant_id, sg_quota, sg_rule_quota):
+ try:
+ conn.network.update_quota(tenant_id,
+ security_group_rules=sg_rule_quota,
+ security_groups=sg_quota)
+ return True
+ except Exception as e:
+ logger.error("Error [update_sg_quota(network, '%s', '%s', "
+ "'%s')]: %s" % (tenant_id, sg_quota, sg_rule_quota, e))
+ return False
+
+
+def delete_security_group(conn, secgroup_id):
+ try:
+ conn.network.delete_security_group(secgroup_id, ignore_missing=False)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_security_group(network, '%s')]: %s"
+ % (secgroup_id, e))
+ return False
+
+
+# *********************************************
+# GLANCE
+# *********************************************
+def get_images(conn):
+ try:
+ images = conn.image.images()
+ return images
+ except Exception as e:
+ logger.error("Error [get_images]: %s" % e)
+ return None
+
+
+def get_image_id(conn, image_name):
+ images = conn.image.images()
+ id = ''
+ for i in images:
+ if i.name == image_name:
+ id = i.id
+ break
+ return id
+
+
+def create_glance_image(conn,
+ image_name,
+ file_path,
+ disk="qcow2",
+ extra_properties={},
+ container="bare",
+ public="public"):
+ if not os.path.isfile(file_path):
+ logger.error("Error: file %s does not exist." % file_path)
+ return None
+ try:
+ image_id = get_image_id(conn, image_name)
+ if image_id != '':
+ logger.info("Image %s already exists." % image_name)
+ else:
+ logger.info("Creating image '%s' from '%s'..." % (image_name,
+ file_path))
+ with open(file_path) as image_data:
+ image = conn.image.upload_image(name=image_name,
+ is_public=public,
+ disk_format=disk,
+ container_format=container,
+ data=image_data,
+ **extra_properties)
+ image_id = image.id
+ return image_id
+ except Exception as e:
+ logger.error("Error [create_glance_image(image, '%s', '%s', "
+ "'%s')]: %s" % (image_name, file_path, public, e))
+ return None
+
+
+def get_or_create_image(name, path, format, extra_properties):
+ image_exists = False
+ conn = get_os_connection()
+
+ image_id = get_image_id(conn, name)
+ if image_id != '':
+ logger.info("Using existing image '%s'..." % name)
+ image_exists = True
+ else:
+ logger.info("Creating image '%s' from '%s'..." % (name, path))
+ image_id = create_glance_image(conn,
+ name,
+ path,
+ format,
+ extra_properties)
+ if not image_id:
+ logger.error("Failed to create a Glance image...")
+ else:
+ logger.debug("Image '%s' with ID=%s created successfully."
+ % (name, image_id))
+
+ return image_exists, image_id
+
+
+def delete_glance_image(conn, image_id):
+ try:
+ conn.image.delete_image(image_id)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_glance_image(image, '%s')]: %s"
+ % (image_id, e))
+ return False
+
+
+# *********************************************
+# CINDER
+# *********************************************
+def get_volumes(conn):
+ try:
+ volumes = conn.block_store.volumes(all_tenants=1)
+ return volumes
+ except Exception as e:
+ logger.error("Error [get_volumes(volume)]: %s" % e)
+ return None
+
+
+def update_cinder_quota(cloud, tenant_id, vols_quota,
+ snapshots_quota, gigabytes_quota):
+ quotas_values = {"volumes": vols_quota,
+ "snapshots": snapshots_quota,
+ "gigabytes": gigabytes_quota}
+
+ try:
+ cloud.set_volume_quotas(tenant_id, **quotas_values)
+ return True
+ except Exception as e:
+ logger.error("Error [update_cinder_quota(volume, '%s', '%s', "
+ "'%s' '%s')]: %s" % (tenant_id, vols_quota,
+ snapshots_quota, gigabytes_quota, e))
+ return False
+
+
+def delete_volume(cloud, volume_id, forced=False):
+ try:
+ if forced:
+ try:
+ volume = cloud.get_volume(volume_id)
+ for attachment in volume.attachments:
+ server = cloud.get_server(attachment.server_id)
+ cloud.detach_volume(server, volume)
+ except Exception:
+ logger.error(sys.exc_info()[0])
+ cloud.delete_volume(volume_id, force=True)
+ else:
+ cloud.delete_volume(volume_id)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_volume(volume, '%s', '%s')]: %s"
+ % (volume_id, str(forced), e))
+ return False
+
+
+# *********************************************
+# KEYSTONE
+# *********************************************
+def get_tenants(keystone_client):
+ try:
+ if is_keystone_v3():
+ tenants = keystone_client.projects.list()
+ else:
+ tenants = keystone_client.tenants.list()
+ return tenants
+ except Exception as e:
+ logger.error("Error [get_tenants(keystone_client)]: %s" % e)
+ return None
+
+
+def get_users(keystone_client):
+ try:
+ users = keystone_client.users.list()
+ return users
+ except Exception as e:
+ logger.error("Error [get_users(keystone_client)]: %s" % e)
+ return None
+
+
+def get_tenant_id(keystone_client, tenant_name):
+ tenants = get_tenants(keystone_client)
+ id = ''
+ for t in tenants:
+ if t.name == tenant_name:
+ id = t.id
+ break
+ return id
+
+
+def get_user_id(keystone_client, user_name):
+ users = get_users(keystone_client)
+ id = ''
+ for u in users:
+ if u.name == user_name:
+ id = u.id
+ break
+ return id
+
+
+def get_role_id(keystone_client, role_name):
+ roles = keystone_client.roles.list()
+ id = ''
+ for r in roles:
+ if r.name == role_name:
+ id = r.id
+ break
+ return id
+
+
+def get_domain_id(keystone_client, domain_name):
+ domains = keystone_client.domains.list()
+ id = ''
+ for d in domains:
+ if d.name == domain_name:
+ id = d.id
+ break
+ return id
+
+
+def create_tenant(keystone_client, tenant_name, tenant_description):
+ try:
+ if is_keystone_v3():
+ domain_name = os.environ['OS_PROJECT_DOMAIN_NAME']
+ domain_id = get_domain_id(keystone_client, domain_name)
+ tenant = keystone_client.projects.create(
+ name=tenant_name,
+ description=tenant_description,
+ domain=domain_id,
+ enabled=True)
+ else:
+ tenant = keystone_client.tenants.create(tenant_name,
+ tenant_description,
+ enabled=True)
+ return tenant.id
+ except Exception as e:
+ logger.error("Error [create_tenant(keystone_client, '%s', '%s')]: %s"
+ % (tenant_name, tenant_description, e))
+ return None
+
+
+def get_or_create_tenant(keystone_client, tenant_name, tenant_description):
+ tenant_id = get_tenant_id(keystone_client, tenant_name)
+ if not tenant_id:
+ tenant_id = create_tenant(keystone_client, tenant_name,
+ tenant_description)
+
+ return tenant_id
+
+
+def get_or_create_tenant_for_vnf(keystone_client, tenant_name,
+ tenant_description):
+ """Get or Create a Tenant
+
+ Args:
+ keystone_client: keystone client reference
+ tenant_name: the name of the tenant
+ tenant_description: the description of the tenant
+
+ return False if tenant retrieved though get
+ return True if tenant created
+ raise Exception if error during processing
+ """
+ try:
+ tenant_id = get_tenant_id(keystone_client, tenant_name)
+ if not tenant_id:
+ tenant_id = create_tenant(keystone_client, tenant_name,
+ tenant_description)
+ return True
+ else:
+ return False
+ except Exception:
+ raise Exception("Impossible to create a Tenant for the VNF {}".format(
+ tenant_name))
+
+
+def create_user(keystone_client, user_name, user_password,
+ user_email, tenant_id):
+ try:
+ if is_keystone_v3():
+ user = keystone_client.users.create(name=user_name,
+ password=user_password,
+ email=user_email,
+ project_id=tenant_id,
+ enabled=True)
+ else:
+ user = keystone_client.users.create(user_name,
+ user_password,
+ user_email,
+ tenant_id,
+ enabled=True)
+ return user.id
+ except Exception as e:
+ logger.error("Error [create_user(keystone_client, '%s', '%s', '%s'"
+ "'%s')]: %s" % (user_name, user_password,
+ user_email, tenant_id, e))
+ return None
+
+
+def get_or_create_user(keystone_client, user_name, user_password,
+ tenant_id, user_email=None):
+ user_id = get_user_id(keystone_client, user_name)
+ if not user_id:
+ user_id = create_user(keystone_client, user_name, user_password,
+ user_email, tenant_id)
+ return user_id
+
+
+def get_or_create_user_for_vnf(keystone_client, vnf_ref):
+ """Get or Create user for VNF
+
+ Args:
+ keystone_client: keystone client reference
+ vnf_ref: VNF reference used as user name & password, tenant name
+
+ return False if user retrieved through get
+ return True if user created
+ raise Exception if error during processing
+ """
+ try:
+ user_id = get_user_id(keystone_client, vnf_ref)
+ tenant_id = get_tenant_id(keystone_client, vnf_ref)
+ created = False
+ if not user_id:
+ user_id = create_user(keystone_client, vnf_ref, vnf_ref,
+ "", tenant_id)
+ created = True
+ try:
+ role_id = get_role_id(keystone_client, 'admin')
+ tenant_id = get_tenant_id(keystone_client, vnf_ref)
+ add_role_user(keystone_client, user_id, role_id, tenant_id)
+ except Exception:
+ logger.warn("Cannot associate user to role admin on tenant")
+ return created
+ except Exception:
+ raise Exception("Impossible to create a user for the VNF {}".format(
+ vnf_ref))
+
+
+def add_role_user(keystone_client, user_id, role_id, tenant_id):
+ try:
+ if is_keystone_v3():
+ keystone_client.roles.grant(role=role_id,
+ user=user_id,
+ project=tenant_id)
+ else:
+ keystone_client.roles.add_user_role(user_id, role_id, tenant_id)
+ return True
+ except Exception as e:
+ logger.error("Error [add_role_user(keystone_client, '%s', '%s'"
+ "'%s')]: %s " % (user_id, role_id, tenant_id, e))
+ return False
+
+
+def delete_tenant(keystone_client, tenant_id):
+ try:
+ if is_keystone_v3():
+ keystone_client.projects.delete(tenant_id)
+ else:
+ keystone_client.tenants.delete(tenant_id)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_tenant(keystone_client, '%s')]: %s"
+ % (tenant_id, e))
+ return False
+
+
+def delete_user(keystone_client, user_id):
+ try:
+ keystone_client.users.delete(user_id)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_user(keystone_client, '%s')]: %s"
+ % (user_id, e))
+ return False
+
+
+# *********************************************
+# HEAT
+# *********************************************
+def create_stack(conn, **kwargs):
+ try:
+ stack = conn.orchestration.create_stack(**kwargs)
+ stack_id = stack.id
+ if stack_id is None:
+ logger.error("Stack create start failed")
+ raise SystemError("Stack create start failed")
+ return stack_id
+ except Exception as e:
+ logger.error("Error [create_stack(orchestration)]: %s" % e)
+ return None
+
+
+def update_stack(conn, stack_id, **kwargs):
+ try:
+ conn.orchestration.update_stack(stack_id, **kwargs)
+ return True
+ except Exception as e:
+ logger.error("Error [update_stack(orchestration)]: %s" % e)
+ return False
+
+
+def delete_stack(conn, stack_id):
+ try:
+ conn.orchestration.delete_stack(stack_id)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_stack(orchestration)]: %s" % e)
+ return False
+
+
+def list_stacks(conn, **kwargs):
+ try:
+ result = conn.orchestration.stacks(**kwargs)
+ return result
+ except Exception as e:
+ logger.error("Error [list_stack(orchestration)]: %s" % e)
+ return None
+
+
+def get_output(conn, stack_id, output_key):
+ try:
+ stack = conn.orchestration.get_stack(stack_id)
+ for output in stack.outputs:
+ if output['output_key'] == output_key:
+ return output
+ except ResourceNotFound as e:
+ logger.error("Error [get_output(orchestration)]: %s" % e)
+ return None
diff --git a/sdnvpn/lib/quagga.py b/sdnvpn/lib/quagga.py
index 5234189..6efd6a9 100644
--- a/sdnvpn/lib/quagga.py
+++ b/sdnvpn/lib/quagga.py
@@ -1,3 +1,12 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2017 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
"""Utilities for setting up quagga peering"""
import logging
@@ -13,12 +22,12 @@ logger = logging.getLogger('sdnvpn-quagga')
COMMON_CONFIG = config.CommonConfig()
-def odl_add_neighbor(neighbor_ip, controller_ip, controller):
- # Explicitly pass controller_ip because controller.ip
+def odl_add_neighbor(neighbor_ip, odl_ip, odl_node):
+ # Explicitly pass odl_ip because odl_node.ip
# Might not be accessible from the Quagga instance
command = 'configure-bgp -op add-neighbor --as-num 200'
- command += ' --ip %s --use-source-ip %s' % (neighbor_ip, controller_ip)
- success = run_odl_cmd(controller, command)
+ command += ' --ip %s --use-source-ip %s' % (neighbor_ip, odl_ip)
+ success = run_odl_cmd(odl_node, command)
# The run_cmd api is really whimsical
logger.info("Maybe stdout of %s: %s", command, success)
return success
@@ -33,18 +42,20 @@ def bootstrap_quagga(fip_addr, controller_ip):
return rc == 0
-def gen_quagga_setup_script(controller_ip,
+def gen_quagga_setup_script(odl_ip,
fake_floating_ip,
- ext_net_mask):
+ ext_net_mask,
+ ip_prefix, rd, irt, ert):
with open(COMMON_CONFIG.quagga_setup_script_path) as f:
template = f.read()
- script = template % (controller_ip,
- fake_floating_ip,
- ext_net_mask)
+ script = template.format(odl_ip,
+ fake_floating_ip,
+ ext_net_mask,
+ ip_prefix, rd, irt, ert)
return script
-def check_for_peering(controller):
+def check_for_peering(odl_node):
cmd = 'show-bgp --cmd \\"ip bgp neighbors\\"'
tries = 20
neighbors = None
@@ -53,7 +64,7 @@ def check_for_peering(controller):
while tries > 0:
if neighbors and 'Established' in neighbors:
break
- neighbors = run_odl_cmd(controller, cmd)
+ neighbors = run_odl_cmd(odl_node, cmd)
logger.info("Output of %s: %s", cmd, neighbors)
if neighbors:
opens = opens_regex.search(neighbors)
diff --git a/sdnvpn/lib/results.py b/sdnvpn/lib/results.py
index 790a916..924b921 100644
--- a/sdnvpn/lib/results.py
+++ b/sdnvpn/lib/results.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -17,7 +17,8 @@ logger = logging.getLogger('sdnvpn-results')
class Results(object):
- def __init__(self, line_length):
+ def __init__(self, line_length, conn=None):
+ self.conn = conn
self.line_length = line_length
self.test_result = "PASS"
self.summary = ""
@@ -29,7 +30,8 @@ class Results(object):
vm_source,
vm_target,
expected="PASS", timeout=30):
- ip_target = vm_target.networks.itervalues().next()[0]
+ ip_target = self.conn.compute.get_server(vm_target).\
+ addresses.values()[0][0]['addr']
self.get_ping_status_target_ip(vm_source, vm_target.name,
ip_target, expected, timeout)
@@ -38,8 +40,10 @@ class Results(object):
target_name,
ip_target,
expected="PASS", timeout=30):
- console_log = vm_source.get_console_output()
- ip_source = vm_source.networks.itervalues().next()[0]
+ console_log = self.conn.compute.\
+ get_server_console_output(vm_source)['output']
+ ip_source = self.conn.compute.get_server(vm_source).\
+ addresses.values()[0][0]['addr']
if "request failed" in console_log:
# Normally, cirros displays this message when userdata fails
logger.debug("It seems userdata is not supported in "
@@ -59,7 +63,8 @@ class Results(object):
tab, target_name, ip_target,
tab, expected_result))
while True:
- console_log = vm_source.get_console_output()
+ console_log = self.conn.compute.\
+ get_server_console_output(vm_source)['output']
# the console_log is a long string, we want to take
# the last 4 lines (for example)
lines = console_log.split('\n')
@@ -128,9 +133,12 @@ class Results(object):
def check_ssh_output(self, vm_source, vm_target,
expected, timeout=30):
- console_log = vm_source.get_console_output()
- ip_source = vm_source.networks.itervalues().next()[0]
- ip_target = vm_target.networks.itervalues().next()[0]
+ console_log = self.conn.compute.\
+ get_server_console_output(vm_source)['output']
+ ip_source = self.conn.compute.get_server(vm_source).\
+ addresses.values()[0][0]['addr']
+ ip_target = self.conn.compute.get_server(vm_target).\
+ addresses.values()[0][0]['addr']
if "request failed" in console_log:
# Normally, cirros displays this message when userdata fails
@@ -148,7 +156,8 @@ class Results(object):
tab, vm_target.name, ip_target,
tab, expected))
while True:
- console_log = vm_source.get_console_output()
+ console_log = self.conn.compute.\
+ get_server_console_output(vm_source)['output']
# the console_log is a long string, we want to take
# the last 4 lines (for example)
lines = console_log.split('\n')
diff --git a/sdnvpn/lib/utils.py b/sdnvpn/lib/utils.py
index 0ab8b84..4c35edc 100644
--- a/sdnvpn/lib/utils.py
+++ b/sdnvpn/lib/utils.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -7,26 +7,32 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
+import json
import logging
import os
-import sys
import time
import requests
import re
import subprocess
+import yaml
from concurrent.futures import ThreadPoolExecutor
+from openstack.exceptions import ResourceNotFound, NotFoundException
+from requests.auth import HTTPBasicAuth
-import functest.utils.openstack_utils as os_utils
+from functest.utils import env
from opnfv.deployment.factory import Factory as DeploymentFactory
from sdnvpn.lib import config as sdnvpn_config
+import sdnvpn.lib.openstack_utils as os_utils
logger = logging.getLogger('sdnvpn_test_utils')
common_config = sdnvpn_config.CommonConfig()
-ODL_USER = 'admin'
-ODL_PASS = 'admin'
+ODL_USER = env.get('SDN_CONTROLLER_USER')
+ODL_PASSWORD = env.get('SDN_CONTROLLER_PASSWORD')
+ODL_IP = env.get('SDN_CONTROLLER_IP')
+ODL_PORT = env.get('SDN_CONTROLLER_RESTCONFPORT')
executor = ThreadPoolExecutor(5)
@@ -35,6 +41,7 @@ class ExtraRoute(object):
"""
Class to represent extra route for a router
"""
+
def __init__(self, destination, nexthop):
self.destination = destination
self.nexthop = nexthop
@@ -44,11 +51,19 @@ class AllowedAddressPair(object):
"""
Class to represent allowed address pair for a neutron port
"""
+
def __init__(self, ipaddress, macaddress):
self.ipaddress = ipaddress
self.macaddress = macaddress
+def create_default_flavor():
+ return os_utils.get_or_create_flavor(common_config.default_flavor,
+ common_config.default_flavor_ram,
+ common_config.default_flavor_disk,
+ common_config.default_flavor_vcpus)
+
+
def create_custom_flavor():
return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
common_config.custom_flavor_ram,
@@ -56,36 +71,38 @@ def create_custom_flavor():
common_config.custom_flavor_vcpus)
-def create_net(neutron_client, name):
+def create_net(conn, name):
logger.debug("Creating network %s", name)
- net_id = os_utils.create_neutron_net(neutron_client, name)
+ net_id = os_utils.create_neutron_net(conn, name)
if not net_id:
logger.error(
"There has been a problem when creating the neutron network")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the neutron network {}".format(name))
return net_id
-def create_subnet(neutron_client, name, cidr, net_id):
+def create_subnet(conn, name, cidr, net_id):
logger.debug("Creating subnet %s in network %s with cidr %s",
name, net_id, cidr)
- subnet_id = os_utils.create_neutron_subnet(neutron_client,
+ subnet_id = os_utils.create_neutron_subnet(conn,
name,
cidr,
net_id)
if not subnet_id:
logger.error(
"There has been a problem when creating the neutron subnet")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the neutron subnet {}".format(name))
return subnet_id
-def create_network(neutron_client, net, subnet1, cidr1,
+def create_network(conn, net, subnet1, cidr1,
router, subnet2=None, cidr2=None):
"""Network assoc won't work for networks/subnets created by this function.
It is an ODL limitation due to it handling routers as vpns.
See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
- network_dic = os_utils.create_network_full(neutron_client,
+ network_dic = os_utils.create_network_full(conn,
net,
subnet1,
router,
@@ -93,7 +110,8 @@ def create_network(neutron_client, net, subnet1, cidr1,
if not network_dic:
logger.error(
"There has been a problem when creating the neutron network")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the neutron network {}".format(net))
net_id = network_dic["net_id"]
subnet_id = network_dic["subnet_id"]
router_id = network_dic["router_id"]
@@ -101,25 +119,25 @@ def create_network(neutron_client, net, subnet1, cidr1,
if subnet2 is not None:
logger.debug("Creating and attaching a second subnet...")
subnet_id = os_utils.create_neutron_subnet(
- neutron_client, subnet2, cidr2, net_id)
+ conn, subnet2, cidr2, net_id)
if not subnet_id:
logger.error(
"There has been a problem when creating the second subnet")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the second subnet {}".format(subnet2))
logger.debug("Subnet '%s' created successfully" % subnet_id)
return net_id, subnet_id, router_id
-def get_port(neutron_client, instance_id):
- ports = os_utils.get_port_list(neutron_client)
- if ports is not None:
- for port in ports:
- if port['device_id'] == instance_id:
- return port
+def get_port(conn, instance_id):
+ ports = os_utils.get_port_list(conn)
+ for port in ports:
+ if port.device_id == instance_id:
+ return port
return None
-def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
+def update_port_allowed_address_pairs(conn, port_id, address_pairs):
if len(address_pairs) <= 0:
return
allowed_address_pairs = []
@@ -127,30 +145,27 @@ def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
address_pair_dict = {'ip_address': address_pair.ipaddress,
'mac_address': address_pair.macaddress}
allowed_address_pairs.append(address_pair_dict)
- json_body = {'port': {
- "allowed_address_pairs": allowed_address_pairs
- }}
try:
- port = neutron_client.update_port(port=port_id,
- body=json_body)
- return port['port']['id']
+ port = conn.network.\
+ update_port(port_id, allowed_address_pairs=allowed_address_pairs)
+ return port.id
except Exception as e:
- logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
+ logger.error("Error [update_neutron_port(network, '%s', '%s')]:"
" %s" % (port_id, address_pairs, e))
return None
-def create_instance(nova_client,
+def create_instance(conn,
name,
image_id,
network_id,
sg_id,
secgroup_name=None,
fixed_ip=None,
- compute_node='',
+ compute_node=None,
userdata=None,
- files=None,
+ files=[],
**kwargs
):
if 'flavor' not in kwargs:
@@ -176,12 +191,14 @@ def create_instance(nova_client,
if instance is None:
logger.error("Error while booting instance.")
- sys.exit(-1)
+ raise Exception("Error while booting instance {}".format(name))
else:
+ # Retrieve IP of INSTANCE
+ network_name = conn.network.get_network(network_id).name
+ instance_ip = conn.compute.get_server(instance).\
+ addresses.get(network_name)[0]['addr']
logger.debug("Instance '%s' booted successfully. IP='%s'." %
- (name, instance.networks.itervalues().next()[0]))
- # Retrieve IP of INSTANCE
- # instance_ip = instance.networks.get(network_id)[0]
+ (name, instance_ip))
if secgroup_name:
logger.debug("Adding '%s' to security group '%s'..."
@@ -189,7 +206,7 @@ def create_instance(nova_client,
else:
logger.debug("Adding '%s' to security group '%s'..."
% (name, sg_id))
- os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
+ os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
return instance
@@ -283,18 +300,16 @@ def get_installerHandler():
return None
else:
if installer_type in ["apex"]:
- developHandler = DeploymentFactory.get_handler(
- installer_type,
- installer_ip,
- 'root',
- pkey_file="/root/.ssh/id_rsa")
-
- if installer_type in ["fuel"]:
- developHandler = DeploymentFactory.get_handler(
- installer_type,
- installer_ip,
- 'root',
- 'r00tme')
+ installer_user = "root"
+ elif installer_type in ["fuel"]:
+ installer_user = "ubuntu"
+
+ developHandler = DeploymentFactory.get_handler(
+ installer_type,
+ installer_ip,
+ installer_user,
+ pkey_file="/root/.ssh/id_rsa")
+
return developHandler
@@ -307,18 +322,21 @@ def get_installer_ip():
return str(os.environ['INSTALLER_IP'])
-def get_instance_ip(instance):
- instance_ip = instance.networks.itervalues().next()[0]
+def get_instance_ip(conn, instance):
+ instance_ip = conn.compute.get_server(instance).\
+ addresses.values()[0][0]['addr']
return instance_ip
def wait_for_instance(instance, pattern=".* login:", tries=40):
logger.info("Waiting for instance %s to boot up" % instance.id)
+ conn = os_utils.get_os_connection()
sleep_time = 2
expected_regex = re.compile(pattern)
console_log = ""
while tries > 0 and not expected_regex.search(console_log):
- console_log = instance.get_console_output()
+ console_log = conn.compute.\
+ get_server_console_output(instance)['output']
time.sleep(sleep_time)
tries -= 1
@@ -357,6 +375,21 @@ def async_Wait_for_instances(instances, tries=40):
logger.error("one or more instances is not yet booted up")
+def wait_for_instance_delete(conn, instance_id, tries=30):
+ sleep_time = 2
+ instances = [instance_id]
+ logger.debug("Waiting for instance %s to be deleted"
+ % (instance_id))
+ while tries > 0 and instance_id in instances:
+ instances = [instance.id for instance in
+ os_utils.get_instances(conn)]
+ time.sleep(sleep_time)
+ tries -= 1
+ if instance_id in instances:
+ logger.error("Deletion of instance %s failed" %
+ (instance_id))
+
+
def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
tries = 30
sleep_time = 1
@@ -412,29 +445,31 @@ def wait_before_subtest(*args, **kwargs):
time.sleep(30)
-def assert_and_get_compute_nodes(nova_client, required_node_number=2):
+def assert_and_get_compute_nodes(conn, required_node_number=2):
"""Get the compute nodes in the deployment
Exit if the deployment doesn't have enough compute nodes"""
- compute_nodes = os_utils.get_hypervisors(nova_client)
+ compute_nodes = os_utils.get_hypervisors(conn)
num_compute_nodes = len(compute_nodes)
if num_compute_nodes < 2:
logger.error("There are %s compute nodes in the deployment. "
"Minimum number of nodes to complete the test is 2."
% num_compute_nodes)
- sys.exit(-1)
+ raise Exception("There are {} compute nodes in the deployment. "
+ "Minimum number of nodes to complete the test"
+ " is 2.".format(num_compute_nodes))
logger.debug("Compute nodes: %s" % compute_nodes)
return compute_nodes
-def open_icmp(neutron_client, security_group_id):
- if os_utils.check_security_group_rules(neutron_client,
+def open_icmp(conn, security_group_id):
+ if os_utils.check_security_group_rules(conn,
security_group_id,
'ingress',
'icmp'):
- if not os_utils.create_secgroup_rule(neutron_client,
+ if not os_utils.create_secgroup_rule(conn,
security_group_id,
'ingress',
'icmp'):
@@ -444,14 +479,14 @@ def open_icmp(neutron_client, security_group_id):
% security_group_id)
-def open_http_port(neutron_client, security_group_id):
- if os_utils.check_security_group_rules(neutron_client,
+def open_http_port(conn, security_group_id):
+ if os_utils.check_security_group_rules(conn,
security_group_id,
'ingress',
'tcp',
80, 80):
- if not os_utils.create_secgroup_rule(neutron_client,
+ if not os_utils.create_secgroup_rule(conn,
security_group_id,
'ingress',
'tcp',
@@ -463,14 +498,14 @@ def open_http_port(neutron_client, security_group_id):
% security_group_id)
-def open_bgp_port(neutron_client, security_group_id):
- if os_utils.check_security_group_rules(neutron_client,
+def open_bgp_port(conn, security_group_id):
+ if os_utils.check_security_group_rules(conn,
security_group_id,
'ingress',
'tcp',
179, 179):
- if not os_utils.create_secgroup_rule(neutron_client,
+ if not os_utils.create_secgroup_rule(conn,
security_group_id,
'ingress',
'tcp',
@@ -502,17 +537,19 @@ def exec_cmd(cmd, verbose):
return output, success
-def check_odl_fib(ip, controller_ip):
+def check_odl_fib(ip):
"""Check that there is an entry in the ODL Fib for `ip`"""
- url = "http://" + controller_ip + \
- ":8181/restconf/config/odl-fib:fibEntries/"
+ url = ("http://{user}:{password}@{ip}:{port}/restconf/config/"
+ "odl-fib:fibEntries/".format(user=ODL_USER,
+ password=ODL_PASSWORD, ip=ODL_IP,
+ port=ODL_PORT))
logger.debug("Querring '%s' for FIB entries", url)
- res = requests.get(url, auth=(ODL_USER, ODL_PASS))
+ res = requests.get(url, auth=(ODL_USER, ODL_PASSWORD))
if res.status_code != 200:
logger.error("OpenDaylight response status code: %s", res.status_code)
return False
logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
- % controller_ip)
+ % ODL_IP)
logger.debug("OpenDaylight FIB: \n%s" % res.text)
return ip in res.text
@@ -530,7 +567,7 @@ def run_odl_cmd(odl_node, cmd):
return odl_node.run_cmd(karaf_cmd)
-def wait_for_cloud_init(instance):
+def wait_for_cloud_init(conn, instance):
success = True
# ubuntu images take a long time to start
tries = 20
@@ -538,7 +575,8 @@ def wait_for_cloud_init(instance):
logger.info("Waiting for cloud init of instance: {}"
"".format(instance.name))
while tries > 0:
- instance_log = instance.get_console_output()
+ instance_log = conn.compute.\
+ get_server_console_output(instance)['output']
if "Failed to run module" in instance_log:
success = False
logger.error("Cloud init failed to run. Reason: %s",
@@ -561,36 +599,52 @@ def wait_for_cloud_init(instance):
def attach_instance_to_ext_br(instance, compute_node):
- libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+ libvirt_instance_name = instance.instance_name
installer_type = str(os.environ['INSTALLER_TYPE'].lower())
- if installer_type == "fuel":
+ # In Apex, br-ex (or br-floating for Fuel) is an ovs bridge and virsh
+ # attach-interface won't just work. We work around it by creating a linux
+ # bridge, attaching that to br-ex (or br-floating for Fuel) with a
+ # veth pair and virsh-attaching the instance to the linux-bridge
+ if installer_type in ["fuel"]:
+ bridge = "br-floating"
+ elif installer_type in ["apex"]:
bridge = "br-ex"
- elif installer_type == "apex":
- # In Apex, br-ex is an ovs bridge and virsh attach-interface
- # won't just work. We work around it by creating a linux
- # bridge, attaching that to br-ex with a veth pair
- # and virsh-attaching the instance to the linux-bridge
- bridge = "br-quagga"
- cmd = """
- set -e
- if ! sudo brctl show |grep -q ^{bridge};then
- sudo brctl addbr {bridge}
- sudo ip link set {bridge} up
- sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
- sudo ip link set dev ovs-quagga-tap up
- sudo ip link set dev quagga-tap up
- sudo ovs-vsctl add-port br-ex ovs-quagga-tap
- sudo brctl addif {bridge} quagga-tap
- fi
- """
- compute_node.run_cmd(cmd.format(bridge=bridge))
+ else:
+ logger.warn("installer type %s is neither fuel nor apex."
+ % installer_type)
+ return
+
+ cmd = """
+ set -e
+ if ! sudo brctl show |grep -q ^br-quagga;then
+ sudo brctl addbr br-quagga
+ sudo ip link set br-quagga up
+ sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
+ sudo ip link set dev ovs-quagga-tap up
+ sudo ip link set dev quagga-tap up
+ sudo ovs-vsctl add-port {bridge} ovs-quagga-tap
+ sudo brctl addif br-quagga quagga-tap
+ fi
+ """
+ compute_node.run_cmd(cmd.format(bridge=bridge))
compute_node.run_cmd("sudo virsh attach-interface %s"
- " bridge %s" % (libvirt_instance_name, bridge))
+ " bridge br-quagga" % (libvirt_instance_name))
def detach_instance_from_ext_br(instance, compute_node):
- libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+ libvirt_instance_name = instance.instance_name
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ # This function undoes all the actions performed by
+ # attach_instance_to_ext_br on Fuel and Apex installers.
+ if installer_type in ["fuel"]:
+ bridge = "br-floating"
+ elif installer_type in ["apex"]:
+ bridge = "br-ex"
+ else:
+ logger.warn("installer type %s is neither fuel nor apex."
+ % installer_type)
+ return
mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
"grep running | awk '{print $2}'); "
"do echo -n ; sudo virsh dumpxml $vm| "
@@ -599,36 +653,26 @@ def detach_instance_from_ext_br(instance, compute_node):
" --type bridge --mac %s"
% (libvirt_instance_name, mac))
- installer_type = str(os.environ['INSTALLER_TYPE'].lower())
- if installer_type == "fuel":
- bridge = "br-ex"
- elif installer_type == "apex":
- # In Apex, br-ex is an ovs bridge and virsh attach-interface
- # won't just work. We work around it by creating a linux
- # bridge, attaching that to br-ex with a veth pair
- # and virsh-attaching the instance to the linux-bridge
- bridge = "br-quagga"
- cmd = """
- sudo brctl delif {bridge} quagga-tap &&
- sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
- sudo ip link set dev quagga-tap down &&
- sudo ip link set dev ovs-quagga-tap down &&
- sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
- sudo ip link set {bridge} down &&
- sudo brctl delbr {bridge}
- """
- compute_node.run_cmd(cmd.format(bridge=bridge))
-
-
-def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
- subnet_ids, router_ids, network_ids):
+ cmd = """
+ sudo brctl delif br-quagga quagga-tap &&
+ sudo ovs-vsctl del-port {bridge} ovs-quagga-tap &&
+ sudo ip link set dev quagga-tap down &&
+ sudo ip link set dev ovs-quagga-tap down &&
+ sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
+ sudo ip link set br-quagga down &&
+ sudo brctl delbr br-quagga
+ """
+ compute_node.run_cmd(cmd.format(bridge=bridge))
+
+def cleanup_neutron(conn, neutron_client, floatingip_ids, bgpvpn_ids,
+ interfaces, subnet_ids, router_ids, network_ids):
if len(floatingip_ids) != 0:
for floatingip_id in floatingip_ids:
- if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
- logging.error('Fail to delete all floating ips. '
- 'Floating ip with id {} was not deleted.'.
- format(floatingip_id))
+ if not os_utils.delete_floating_ip(conn, floatingip_id):
+ logger.error('Fail to delete all floating ips. '
+ 'Floating ip with id {} was not deleted.'.
+ format(floatingip_id))
return False
if len(bgpvpn_ids) != 0:
@@ -637,69 +681,67 @@ def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
if len(interfaces) != 0:
for router_id, subnet_id in interfaces:
- if not os_utils.remove_interface_router(neutron_client,
+ if not os_utils.remove_interface_router(conn,
router_id, subnet_id):
- logging.error('Fail to delete all interface routers. '
- 'Interface router with id {} was not deleted.'.
- format(router_id))
+ logger.error('Fail to delete all interface routers. '
+ 'Interface router with id {} was not deleted.'.
+ format(router_id))
if len(router_ids) != 0:
for router_id in router_ids:
- if not os_utils.remove_gateway_router(neutron_client, router_id):
- logging.error('Fail to delete all gateway routers. '
- 'Gateway router with id {} was not deleted.'.
- format(router_id))
+ if not os_utils.remove_gateway_router(conn, router_id):
+ logger.error('Fail to delete all gateway routers. '
+ 'Gateway router with id {} was not deleted.'.
+ format(router_id))
if len(subnet_ids) != 0:
for subnet_id in subnet_ids:
- if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
- logging.error('Fail to delete all subnets. '
- 'Subnet with id {} was not deleted.'.
- format(subnet_id))
+ if not os_utils.delete_neutron_subnet(conn, subnet_id):
+ logger.error('Fail to delete all subnets. '
+ 'Subnet with id {} was not deleted.'.
+ format(subnet_id))
return False
if len(router_ids) != 0:
for router_id in router_ids:
- if not os_utils.delete_neutron_router(neutron_client, router_id):
- logging.error('Fail to delete all routers. '
- 'Router with id {} was not deleted.'.
- format(router_id))
+ if not os_utils.delete_neutron_router(conn, router_id):
+ logger.error('Fail to delete all routers. '
+ 'Router with id {} was not deleted.'.
+ format(router_id))
return False
if len(network_ids) != 0:
for network_id in network_ids:
- if not os_utils.delete_neutron_net(neutron_client, network_id):
- logging.error('Fail to delete all networks. '
- 'Network with id {} was not deleted.'.
- format(network_id))
+ if not os_utils.delete_neutron_net(conn, network_id):
+ logger.error('Fail to delete all networks. '
+ 'Network with id {} was not deleted.'.
+ format(network_id))
return False
return True
-def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
+def cleanup_nova(conn, instance_ids, flavor_ids=None):
if flavor_ids is not None and len(flavor_ids) != 0:
for flavor_id in flavor_ids:
- if not nova_client.flavors.delete(flavor_id):
- logging.error('Fail to delete flavor. '
- 'Flavor with id {} was not deleted.'.
- format(flavor_id))
+ conn.compute.delete_flavor(flavor_id)
if len(instance_ids) != 0:
for instance_id in instance_ids:
- if not os_utils.delete_instance(nova_client, instance_id):
- logging.error('Fail to delete all instances. '
- 'Instance with id {} was not deleted.'.
- format(instance_id))
- return False
+ if not os_utils.delete_instance(conn, instance_id):
+ logger.error('Fail to delete all instances. '
+ 'Instance with id {} was not deleted.'.
+ format(instance_id))
+ else:
+ wait_for_instance_delete(conn, instance_id)
return True
-def cleanup_glance(glance_client, image_ids):
+def cleanup_glance(conn, image_ids):
if len(image_ids) != 0:
for image_id in image_ids:
- if not os_utils.delete_glance_image(glance_client, image_id):
- logging.error('Fail to delete all images. '
- 'Image with id {} was not deleted.'.
- format(image_id))
+ if not os_utils.delete_glance_image(conn, image_id):
+ logger.error('Fail to delete all images. '
+ 'Image with id {} was not deleted.'.
+ format(image_id))
return False
return True
@@ -759,6 +801,15 @@ def is_fail_mode_secure():
if not openstack_node.is_active():
continue
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ if installer_type in ['fuel']:
+ if (
+ 'controller' in openstack_node.roles or
+ 'opendaylight' in openstack_node.roles or
+ 'installer' in openstack_node.roles
+ ):
+ continue
+
ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
strip().split('\n'))
if 'br-int' in ovs_int_list:
@@ -770,59 +821,55 @@ def is_fail_mode_secure():
is_secure[openstack_node.name] = True
else:
# failure
- logging.error('The fail_mode for br-int was not secure '
- 'in {} node'.format(openstack_node.name))
+ logger.error('The fail_mode for br-int was not secure '
+ 'in {} node'.format(openstack_node.name))
is_secure[openstack_node.name] = False
return is_secure
-def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
- subnet_quota, port_quota):
- json_body = {"quota": {
- "network": nw_quota,
- "subnet": subnet_quota,
- "port": port_quota
- }}
-
+def update_nw_subnet_port_quota(conn, tenant_id, nw_quota,
+ subnet_quota, port_quota, router_quota):
try:
- neutron_client.update_quota(tenant_id=tenant_id,
- body=json_body)
+ conn.network.update_quota(tenant_id, networks=nw_quota,
+ subnets=subnet_quota, ports=port_quota,
+ routers=router_quota)
return True
except Exception as e:
- logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
- " '%s', '%s', '%s', '%s')]: %s" %
- (tenant_id, nw_quota, subnet_quota, port_quota, e))
+ logger.error("Error [update_nw_subnet_port_quota(network,"
+ " '%s', '%s', '%s', '%s, %s')]: %s" %
+ (tenant_id, nw_quota, subnet_quota,
+ port_quota, router_quota, e))
return False
-def update_instance_quota_class(nova_client, instances_quota):
+def update_instance_quota_class(cloud, instances_quota):
try:
- nova_client.quota_classes.update("default", instances=instances_quota)
+ cloud.set_compute_quotas('admin', instances=instances_quota)
return True
except Exception as e:
- logger.error("Error [update_instance_quota_class(nova_client,"
+ logger.error("Error [update_instance_quota_class(compute,"
" '%s' )]: %s" % (instances_quota, e))
return False
-def get_neutron_quota(neutron_client, tenant_id):
+def get_neutron_quota(conn, tenant_id):
try:
- return neutron_client.show_quota(tenant_id=tenant_id)['quota']
- except Exception as e:
- logger.error("Error in getting neutron quota for tenant "
+ return conn.network.get_quota(tenant_id)
+ except ResourceNotFound as e:
+ logger.error("Error in getting network quota for tenant "
" '%s' )]: %s" % (tenant_id, e))
raise
-def get_nova_instances_quota(nova_client):
+def get_nova_instances_quota(cloud):
try:
- return nova_client.quota_classes.get("default").instances
+ return cloud.get_compute_quotas('admin').instances
except Exception as e:
logger.error("Error in getting nova instances quota: %s" % e)
raise
-def update_router_extra_route(neutron_client, router_id, extra_routes):
+def update_router_extra_route(conn, router_id, extra_routes):
if len(extra_routes) <= 0:
return
routes_list = []
@@ -830,26 +877,19 @@ def update_router_extra_route(neutron_client, router_id, extra_routes):
route_dict = {'destination': extra_route.destination,
'nexthop': extra_route.nexthop}
routes_list.append(route_dict)
- json_body = {'router': {
- "routes": routes_list
- }}
try:
- neutron_client.update_router(router_id, body=json_body)
+ conn.network.update_router(router_id, routes=routes_list)
return True
except Exception as e:
logger.error("Error in updating router with extra route: %s" % e)
raise
-def update_router_no_extra_route(neutron_client, router_ids):
- json_body = {'router': {
- "routes": [
- ]}}
-
+def update_router_no_extra_route(conn, router_ids):
for router_id in router_ids:
try:
- neutron_client.update_router(router_id, body=json_body)
+ conn.network.update_router(router_id, routes=[])
return True
except Exception as e:
logger.error("Error in clearing extra route: %s" % e)
@@ -887,3 +927,244 @@ def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
split("\n"))
return cmd_out_lines
+
+
+def get_node_ip_and_netmask(node, iface):
+ cmd = "ip a | grep {iface} | grep inet | awk '{{print $2}}'"\
+ .format(iface=iface)
+ mgmt_net_cidr = node.run_cmd(cmd).strip().split('\n')
+ mgmt_ip = mgmt_net_cidr[0].split('/')[0]
+ mgmt_netmask = mgmt_net_cidr[0].split('/')[1]
+
+ return mgmt_ip, mgmt_netmask
+
+
+def get_odl_bgp_entity_owner(odl_nodes):
+ """ Finds the ODL owner of the BGP entity in the cluster.
+
+ When ODL runs in clustering mode we need to execute the BGP speaker
+ related commands to that ODL which is the owner of the BGP entity.
+
+ :param odl_nodes: list of Opendaylight nodes
+ :return odl_node: Opendaylight node in which ODL BGP entity owner runs
+ """
+ if len(odl_nodes) == 1:
+ return odl_nodes[0]
+ else:
+ url = ('http://{user}:{password}@{ip}:{port}/restconf/'
+ 'operational/entity-owners:entity-owners/entity-type/bgp'
+ .format(user=ODL_USER, password=ODL_PASSWORD, ip=ODL_IP,
+ port=ODL_PORT))
+
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ if installer_type in ['apex']:
+ node_user = 'heat-admin'
+ elif installer_type in ['fuel']:
+ node_user = 'ubuntu'
+
+ remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
+ 'initial/akka.conf')
+ remote_odl_home_akka_conf = '/home/{0}/akka.conf'.format(node_user)
+ local_tmp_akka_conf = '/tmp/akka.conf'
+ try:
+ json_output = requests.get(url).json()
+ except Exception:
+ logger.error('Failed to find the ODL BGP '
+ 'entity owner through REST')
+ return None
+ odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
+
+ for odl_node in odl_nodes:
+ if installer_type in ['apex']:
+ get_odl_id_cmd = 'sudo docker ps -qf name=opendaylight_api'
+ odl_id = odl_node.run_cmd(get_odl_id_cmd)
+ odl_node.run_cmd('sudo docker cp '
+ '{container_id}:{odl_akka_conf} '
+ '/home/{user}/'
+ .format(container_id=odl_id,
+ odl_akka_conf=remote_odl_akka_conf,
+ user=node_user))
+ elif installer_type in ['fuel']:
+ odl_node.run_cmd('sudo cp {0} /home/{1}/'
+ .format(remote_odl_akka_conf, node_user))
+ odl_node.run_cmd('sudo chmod 777 {0}'
+ .format(remote_odl_home_akka_conf))
+ odl_node.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
+
+ for line in open(local_tmp_akka_conf):
+ if re.search(odl_bgp_owner, line):
+ return odl_node
+ return None
+
+
+def add_quagga_external_gre_end_point(odl_nodes, remote_tep_ip):
+ json_body = {'input':
+ {'destination-ip': remote_tep_ip,
+ 'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
+ }
+ url = ('http://{ip}:{port}/restconf/operations/'
+ 'itm-rpc:add-external-tunnel-endpoint'.format(ip=ODL_IP,
+ port=ODL_PORT))
+ headers = {'Content-type': 'application/yang.data+json',
+ 'Accept': 'application/yang.data+json'}
+ try:
+ requests.post(url, data=json.dumps(json_body),
+ headers=headers,
+ auth=HTTPBasicAuth(ODL_USER, ODL_PASSWORD))
+ except Exception as e:
+ logger.error("Failed to create external tunnel endpoint on"
+ " ODL for external tep ip %s with error %s"
+ % (remote_tep_ip, e))
+ return None
+
+
+def is_fib_entry_present_on_odl(odl_nodes, ip_prefix, vrf_id):
+ url = ('http://{user}:{password}@{ip}:{port}/restconf/config/'
+ 'odl-fib:fibEntries/vrfTables/{vrf}/'
+ .format(user=ODL_USER, password=ODL_PASSWORD, ip=ODL_IP,
+ port=ODL_PORT, vrf=vrf_id))
+ logger.error("url is %s" % url)
+ try:
+ vrf_table = requests.get(url).json()
+ is_ipprefix_exists = False
+ for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
+ if vrf_entry['destPrefix'] == ip_prefix:
+ is_ipprefix_exists = True
+ break
+ return is_ipprefix_exists
+ except Exception as e:
+ logger.error('Failed to find ip prefix %s with error %s'
+ % (ip_prefix, e))
+ return False
+
+
+def wait_stack_for_status(conn, stack_id, stack_status, limit=12):
+ """ Waits to reach specified stack status. To be used with
+ CREATE_COMPLETE and UPDATE_COMPLETE.
+ Will try a specific number of attempts at 10sec intervals
+ (default 2min)
+
+ :param stack_id: the stack id returned by create_stack api call
+ :param stack_status: the stack status waiting for
+ :param limit: the maximum number of attempts
+ """
+ logger.debug("Stack '%s' create started" % stack_id)
+
+ stack_create_complete = False
+ attempts = 0
+ while attempts < limit:
+ try:
+ stack_st = conn.orchestration.get_stack(stack_id).status
+ except NotFoundException:
+ logger.error("Stack create failed")
+ raise SystemError("Stack create failed")
+ return False
+ if stack_st == stack_status:
+ stack_create_complete = True
+ break
+ attempts += 1
+ time.sleep(10)
+
+ logger.debug("Stack status check: %s times" % attempts)
+ if stack_create_complete is False:
+ logger.error("Stack create failed")
+ raise SystemError("Stack create failed")
+ return False
+
+ return True
+
+
+def delete_stack_and_wait(conn, stack_id, limit=12):
+ """ Starts and waits for completion of delete stack
+
+ Will try a specific number of attempts at 10sec intervals
+ (default 2min)
+
+ :param stack_id: the id of the stack to be deleted
+ :param limit: the maximum number of attempts
+ """
+ delete_started = False
+ if stack_id is not None:
+ delete_started = os_utils.delete_stack(conn, stack_id)
+
+ if delete_started is True:
+ logger.debug("Stack delete succesfully started")
+ else:
+ logger.error("Stack delete start failed")
+
+ stack_delete_complete = False
+ attempts = 0
+ while attempts < limit:
+ try:
+ stack_st = conn.orchestration.get_stack(stack_id).status
+ if stack_st == 'DELETE_COMPLETE':
+ stack_delete_complete = True
+ break
+ attempts += 1
+ time.sleep(10)
+ except NotFoundException:
+ stack_delete_complete = True
+ break
+
+ logger.debug("Stack status check: %s times" % attempts)
+ if not stack_delete_complete:
+ logger.error("Stack delete failed")
+ raise SystemError("Stack delete failed")
+ return False
+
+ return True
+
+
+def get_heat_environment(testcase, common_config):
+ """ Reads the heat parameters of a testcase into a yaml object
+
+ Each testcase where Heat Orchestratoin Template (HOT) is introduced
+ has an associated parameters section.
+ Reads testcase.heat_parameters section and read COMMON_CONFIG.flavor
+ and place it under parameters tree.
+
+ :param testcase: the tescase for which the HOT file is fetched
+ :param common_config: the common config section
+ :return environment: a yaml object to be used as environment
+ """
+ fl = common_config.default_flavor
+ param_dict = testcase.heat_parameters
+ param_dict['flavor'] = fl
+ env_dict = {'parameters': param_dict}
+ return env_dict
+
+
+def get_vms_from_stack_outputs(conn, stack_id, vm_stack_output_keys):
+ """ Converts a vm name from a heat stack output to a nova vm object
+
+ :param stack_id: the id of the stack to fetch the vms from
+ :param vm_stack_output_keys: a list of stack outputs with the vm names
+ :return vms: a list of vm objects corresponding to the outputs
+ """
+ vms = []
+ for vmk in vm_stack_output_keys:
+ vm_output = os_utils.get_output(conn, stack_id, vmk)
+ if vm_output is not None:
+ vm_name = vm_output['output_value']
+ logger.debug("vm '%s' read from heat output" % vm_name)
+ vm = os_utils.get_instance_by_name(conn, vm_name)
+ if vm is not None:
+ vms.append(vm)
+ return vms
+
+
+def merge_yaml(y1, y2):
+ """ Merge two yaml HOT into one
+
+ The parameters, resources and outputs sections are merged.
+
+ :param y1: the first yaml
+ :param y2: the second yaml
+ :return y: merged yaml
+ """
+ d1 = yaml.load(y1)
+ d2 = yaml.load(y2)
+ for key in ('parameters', 'resources', 'outputs'):
+ if key in d2:
+ d1[key].update(d2[key])
+ return yaml.dump(d1, default_flow_style=False)
diff --git a/sdnvpn/sh_utils/fetch-log-script.sh b/sdnvpn/sh_utils/fetch-log-script.sh
index c3c037d..9b0dc74 100755
--- a/sdnvpn/sh_utils/fetch-log-script.sh
+++ b/sdnvpn/sh_utils/fetch-log-script.sh
@@ -107,7 +107,11 @@ node(){
fi
done
# not all messages only tail the last 10k lines
- tail -n 10000 /var/log/messages > messages
+ if [ -f /var/log/messages ]; then
+ tail -n 10000 /var/log/messages > messages
+ elif [ -f /var/log/syslog ]; then
+ tail -n 10000 /var/log/syslog > messages
+ fi
}
_curl_data_store(){
@@ -137,7 +141,11 @@ datastore()
dump=$tmp_folder/dump-$HOSTNAME.txt
operational=$tmp_folder/Operational-Inventory-$HOSTNAME.txt
karaf_output=$tmp_folder/Karaf_out-$HOSTNAME.txt
- odl_ip_port=$(grep ^url= /etc/neutron/plugins/ml2/ml2_conf.ini |cut -d '/' -f3)
+ if [ -f /etc/neutron/plugins/ml2/ml2_conf.ini ]; then
+ odl_ip_port=$(grep ^url= /etc/neutron/plugins/ml2/ml2_conf.ini |cut -d '/' -f3)
+ else
+ odl_ip_port=$(netstat -tln | grep '8080\|8081\|8181\|8282' | awk 'NR==1 {print $4}')
+ fi
config_urls=( restconf/config/neutron:neutron/networks/ restconf/config/neutron:neutron/subnets/ restconf/config/neutron:neutron/ports/ restconf/config/neutron:neutron/routers/ restconf/config/itm:transport-zones/ restconf/config/itm-state:tunnels_state/ restconf/config/itm-state:external-tunnel-list/ restconf/config/itm-state:dpn-endpoints/ restconf/config/itm-config:vtep-config-schemas/ restconf/config/itm-config:tunnel-monitor-enabled/ restconf/config/itm-config:tunnel-monitor-interval/ restconf/config/interface-service-bindings:service-bindings/ restconf/config/l3vpn:vpn-instances/ restconf/config/ietf-interfaces:interfaces/ restconf/config/l3vpn:vpn-interfaces/ restconf/config/odl-fib:fibEntries restconf/config/neutronvpn:networkMaps restconf/config/neutronvpn:subnetmaps restconf/config/neutronvpn:vpnMaps restconf/config/neutronvpn:neutron-port-data restconf/config/id-manager:id-pools/ restconf/config/elan:elan-instances/ restconf/config/elan:elan-interfaces/ restconf/config/elan:elan-state/ restconf/config/elan:elan-forwarding-tables/ restconf/config/elan:elan-interface-forwarding-entries/ restconf/config/elan:elan-dpn-interfaces/ restconf/config/elan:elan-tag-name-map/ restconf/config/odl-nat:external-networks/ restconf/config/odl-nat:ext-routers/ restconf/config/odl-nat:intext-ip-port-map/ restconf/config/odl-nat:snatint-ip-port-map/ restconf/config/odl-l3vpn:vpn-instance-to-vpn-id/ restconf/config/neutronvpn:neutron-router-dpns/ restconf/operational/itm-config:tunnel-monitor-interval/ restconf/config/itm-config:tunnel-monitor-interval/ restconf/operational/itm-config:tunnel-monitor-params/ restconf/config/itm-config:tunnel-monitor-params/ restconf/config/vpnservice-dhcp:designated-switches-for-external-tunnels/ restconf/config/neutron:neutron/security-groups/ restconf/config/neutron:neutron/security-rules/ restconf/config/network-topology:network-topology/topology/hwvtep:1 restconf/config/network-topology:network-topology/topology/ovsdb:1 )
diff --git a/sdnvpn/test/functest/config.yaml b/sdnvpn/test/functest/config.yaml
index a5f4782..3d2fd8b 100644
--- a/sdnvpn/test/functest/config.yaml
+++ b/sdnvpn/test/functest/config.yaml
@@ -1,37 +1,99 @@
+---
defaults:
- flavor: m1.tiny # adapt to your environment
+ flavor: m1.tiny # adapt to your environment
testcases:
- sdnvpn.test.functest.tempest:
- enabled: true
- description: Neutron BGPVPN tests in tempest
-
sdnvpn.test.functest.testcase_1:
- enabled: true
- description: VPN provides connectivity between subnets
+ enabled: true
+ order: 1
+ description: VPN provides connectivity between subnets
+ instance_1_name: sdnvpn-1-1
+ instance_2_name: sdnvpn-1-2
+ instance_3_name: sdnvpn-1-3
+ instance_4_name: sdnvpn-1-4
+ instance_5_name: sdnvpn-1-5
+ image_name: sdnvpn-image
+ net_1_name: sdnvpn-1-1-net
+ subnet_1_name: sdnvpn-1-1-subnet
+ subnet_1_cidr: 10.10.10.0/24
+ router_1_name: sdnvpn-1-1-router
+ net_2_name: sdnvpn-1-2-net
+ subnet_2_name: sdnvpn-1-2-subnet
+ subnet_2_cidr: 10.10.11.0/24
+ router_2_name: sdnvpn-1-2-router
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
+ targets1: '88:88'
+ targets2: '55:55'
+ route_distinguishers: '11:11'
+
+ sdnvpn.test.functest.testcase_1bis:
+ enabled: true
+ order: 14
+ description: Test bed for HOT introduction - same tests as case 1
+ image_name: sdnvpn-image
+ stack_name: stack-1bis
+ hot_file_name: artifacts/testcase_1bis.yaml
+ heat_parameters:
instance_1_name: sdnvpn-1-1
instance_2_name: sdnvpn-1-2
instance_3_name: sdnvpn-1-3
instance_4_name: sdnvpn-1-4
instance_5_name: sdnvpn-1-5
- image_name: sdnvpn-image
net_1_name: sdnvpn-1-1-net
subnet_1_name: sdnvpn-1-1-subnet
subnet_1_cidr: 10.10.10.0/24
- router_1_name: sdnvpn-1-1-router
net_2_name: sdnvpn-1-2-net
subnet_2_name: sdnvpn-1-2-subnet
subnet_2_cidr: 10.10.11.0/24
- router_2_name: sdnvpn-1-2-router
secgroup_name: sdnvpn-sg
secgroup_descr: Security group for SDNVPN test cases
- targets1: '88:88'
- targets2: '55:55'
- route_distinguishers: '11:11'
+ targets1: '88:88'
+ targets2: '55:55'
+ route_distinguishers: '11:11'
sdnvpn.test.functest.testcase_2:
- enabled: true
- description: Tenant separation
+ enabled: true
+ order: 2
+ description: Tenant separation
+ instance_1_name: sdnvpn-2-1
+ instance_2_name: sdnvpn-2-2
+ instance_3_name: sdnvpn-2-3
+ instance_4_name: sdnvpn-2-4
+ instance_5_name: sdnvpn-2-5
+ instance_1_ip: 10.10.10.11
+ instance_2_ip: 10.10.10.12
+ instance_3_ip: 10.10.11.13
+ instance_4_ip: 10.10.10.12
+ instance_5_ip: 10.10.11.13
+ image_name: sdnvpn-image
+ net_1_name: sdnvpn-2-1-net
+ subnet_1a_name: sdnvpn-2-1a-subnet
+ subnet_1a_cidr: 10.10.10.0/24
+ subnet_1b_name: sdnvpn-2-1b-subnet
+ subnet_1b_cidr: 10.10.11.0/24
+ router_1_name: sdnvpn-2-1-router
+ net_2_name: sdnvpn-2-2-net
+ subnet_2a_name: sdnvpn-2-2a-subnet
+ subnet_2a_cidr: 10.10.11.0/24
+ subnet_2b_name: sdnvpn-2-2b-subnet
+ subnet_2b_cidr: 10.10.10.0/24
+ router_2_name: sdnvpn-2-2-router
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
+ targets1: '88:88'
+ targets2: '55:55'
+ route_distinguishers1: '111:111'
+ route_distinguishers2: '222:222'
+
+ sdnvpn.test.functest.testcase_2bis:
+ enabled: true
+ order: 15
+ description: Tenant separation -same as test case 2
+ image_name: sdnvpn-image
+ stack_name: stack-2bis
+ hot_file_name: artifacts/testcase_2bis.yaml
+ heat_parameters:
instance_1_name: sdnvpn-2-1
instance_2_name: sdnvpn-2-2
instance_3_name: sdnvpn-2-3
@@ -42,7 +104,6 @@ testcases:
instance_3_ip: 10.10.11.13
instance_4_ip: 10.10.10.12
instance_5_ip: 10.10.11.13
- image_name: sdnvpn-image
net_1_name: sdnvpn-2-1-net
subnet_1a_name: sdnvpn-2-1a-subnet
subnet_1a_cidr: 10.10.10.0/24
@@ -57,43 +118,77 @@ testcases:
router_2_name: sdnvpn-2-2-router
secgroup_name: sdnvpn-sg
secgroup_descr: Security group for SDNVPN test cases
- targets1: '88:88'
- targets2: '55:55'
- route_distinguishers1: '111:111'
- route_distinguishers2: '222:222'
+ targets1: '88:88'
+ targets2: '55:55'
+ route_distinguishers1: '111:111'
+ route_distinguishers2: '222:222'
sdnvpn.test.functest.testcase_3:
- enabled: true
- description: Data center gateway integration
- secgroup_name: sdnvpn-sg
- secgroup_descr: Security group for SDNVPN test cases
- image_name: sdnvpn-image
- ubuntu_image_name: sdnvpn-ubuntu-image
- net_1_name: sdnvpn-3-1-net
- subnet_1_name: sdnvpn-3-1-subnet
- subnet_1_cidr: 10.10.10.0/24
- router_1_name: sdnvpn-3-1-router
- quagga_net_name: sdnvpn-3-2-quagga-net
- quagga_subnet_name: sdnvpn-3-2-quagga-subnet
- quagga_subnet_cidr: 10.10.11.0/24
- quagga_router_name: sdnvpn-3-2-quagga-router
- quagga_instance_name: sdnvpn-3-2-quagga
- quagga_instance_ip: 10.10.11.5
- instance_1_name: sdnvpn-3-1
- instance_1_ip: 10.10.10.5
- import_targets: '31:31'
- export_targets: '32:32'
-
+ enabled: true
+ order: 3
+ description: Data center gateway integration
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
+ image_name: sdnvpn-image
+ ubuntu_image_name: sdnvpn-ubuntu-image
+ net_1_name: sdnvpn-3-1-net
+ subnet_1_name: sdnvpn-3-1-subnet
+ subnet_1_cidr: 10.10.10.0/24
+ router_1_name: sdnvpn-3-1-router
+ quagga_net_name: sdnvpn-3-2-quagga-net
+ quagga_subnet_name: sdnvpn-3-2-quagga-subnet
+ quagga_subnet_cidr: 10.10.11.0/24
+ quagga_router_name: sdnvpn-3-2-quagga-router
+ quagga_instance_name: sdnvpn-3-2-quagga
+ quagga_instance_ip: 10.10.11.5
+ instance_1_name: sdnvpn-3-1
+ instance_1_ip: 10.10.10.5
+ route_targets: '88:88'
+ import_targets: '88:88'
+ export_targets: '88:88'
+ route_distinguishers: '18:18'
+ external_network_name: External Network in Quagga VM
+ external_network_ip_prefix: 30.1.1.1/32
+ external_network_ip: 30.1.1.1
sdnvpn.test.functest.testcase_4:
- enabled: true
- description: VPN provides connectivity between subnets using router association
+ enabled: true
+ order: 4
+ description: "VPN provides connectivity between subnets using router \
+ association"
+ instance_1_name: sdnvpn-4-1
+ instance_2_name: sdnvpn-4-2
+ instance_3_name: sdnvpn-4-3
+ instance_4_name: sdnvpn-4-4
+ instance_5_name: sdnvpn-4-5
+ image_name: sdnvpn-image
+ net_1_name: sdnvpn-4-1-net
+ subnet_1_name: sdnvpn-4-1-subnet
+ subnet_1_cidr: 10.10.10.0/24
+ router_1_name: sdnvpn-4-1-router
+ net_2_name: sdnvpn-4-2-net
+ subnet_2_name: sdnvpn-4-2-subnet
+ subnet_2_cidr: 10.10.11.0/24
+ router_2_name: sdnvpn-4-2-router
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
+ targets1: '88:88'
+ targets2: '55:55'
+ route_distinguishers: '12:12'
+
+ sdnvpn.test.functest.testcase_4bis:
+ enabled: true
+ order: 17
+ description: Test bed for HOT introduction - same tests as case 4
+ image_name: sdnvpn-image
+ stack_name: stack-4bis
+ hot_file_name: artifacts/testcase_4bis.yaml
+ heat_parameters:
instance_1_name: sdnvpn-4-1
instance_2_name: sdnvpn-4-2
instance_3_name: sdnvpn-4-3
instance_4_name: sdnvpn-4-4
instance_5_name: sdnvpn-4-5
- image_name: sdnvpn-image
net_1_name: sdnvpn-4-1-net
subnet_1_name: sdnvpn-4-1-subnet
subnet_1_cidr: 10.10.10.0/24
@@ -101,36 +196,62 @@ testcases:
net_2_name: sdnvpn-4-2-net
subnet_2_name: sdnvpn-4-2-subnet
subnet_2_cidr: 10.10.11.0/24
- router_2_name: sdnvpn-4-2-router
secgroup_name: sdnvpn-sg
secgroup_descr: Security group for SDNVPN test cases
- targets1: '88:88'
- targets2: '55:55'
- route_distinguishers: '12:12'
+ targets1: '88:88'
+ targets2: '55:55'
+ route_distinguishers: '12:12'
sdnvpn.test.functest.testcase_7:
- enabled: false
- description: Network associate VPNs with routers attached (ODL Bug 6962)
- image_name: sdnvpn-image
- instance_1_name: sdnvpn-7-1
- instance_2_name: sdnvpn-7-2
- net_1_name: sdnvpn-7-1
- subnet_1_name: sdnvpn-7-1-subnet
- subnet_1_cidr: 10.10.10.0/24
- router_1_name: sdnvpn-7-1-router
- net_2_name: sdnvpn-7-2
- subnet_2_name: sdnvpn-7-2-subnet
- subnet_2_cidr: 10.10.20.0/24
- router_2_name: sdnvpn-7-2-router
- secgroup_name: sdnvpn-sg
- secgroup_descr: Security group for SDNVPN test cases
- targets: '77:77'
- route_distinguishers: '11:11'
+ enabled: false
+ order: 7
+ description: Network associate VPNs with routers attached (ODL Bug 6962)
+ image_name: sdnvpn-image
+ instance_1_name: sdnvpn-7-1
+ instance_2_name: sdnvpn-7-2
+ net_1_name: sdnvpn-7-1
+ subnet_1_name: sdnvpn-7-1-subnet
+ subnet_1_cidr: 10.10.10.0/24
+ router_1_name: sdnvpn-7-1-router
+ net_2_name: sdnvpn-7-2
+ subnet_2_name: sdnvpn-7-2-subnet
+ subnet_2_cidr: 10.10.20.0/24
+ router_2_name: sdnvpn-7-2-router
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
+ targets: '77:77'
+ route_distinguishers: '11:11'
sdnvpn.test.functest.testcase_8:
- enabled: true
- description: Test floating IP and router assoc coexistence
- image_name: sdnvpn-image
+ enabled: true
+ order: 8
+ description: Test floating IP and router assoc coexistence
+ image_name: sdnvpn-image
+ instance_1_name: sdnvpn-8-1
+ instance_2_name: sdnvpn-8-2
+ net_1_name: sdnvpn-8-1
+ subnet_1_name: sdnvpn-8-1-subnet
+ subnet_1_cidr: 10.10.10.0/24
+ router_1_name: sdnvpn-8-1-router
+ net_2_name: sdnvpn-8-2
+ subnet_2_name: sdnvpn-8-2-subnet
+ subnet_2_cidr: 10.10.20.0/24
+ router_2_name: sdnvpn-8-2-router
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
+ targets: '88:88'
+ route_distinguishers: '18:18'
+
+ sdnvpn.test.functest.testcase_8bis:
+ enabled: true
+ order: 21
+ description: "Test floating IP and router assoc coexistence \
+ same as test case 8"
+ image_name: sdnvpn-image
+ stack_name: stack-8bis
+ hot_file_name: artifacts/testcase_8bis.yaml
+ hot_update_file_name: artifacts/testcase_8bis_upd.yaml
+ heat_parameters:
instance_1_name: sdnvpn-8-1
instance_2_name: sdnvpn-8-2
net_1_name: sdnvpn-8-1
@@ -140,81 +261,88 @@ testcases:
net_2_name: sdnvpn-8-2
subnet_2_name: sdnvpn-8-2-subnet
subnet_2_cidr: 10.10.20.0/24
- router_2_name: sdnvpn-8-2-router
secgroup_name: sdnvpn-sg
secgroup_descr: Security group for SDNVPN test cases
- targets: '88:88'
- route_distinguishers: '18:18'
+ targets: '88:88'
+ route_distinguishers: '18:18'
sdnvpn.test.functest.testcase_9:
- enabled: true
- description: Verify that all OpenStack nodes OVS br-int have fail_mode set to secure.
+ enabled: true
+ order: 9
+ description: "Verify that all OpenStack nodes OVS br-int have fail_mode \
+ set to secure."
sdnvpn.test.functest.testcase_10:
- enabled: true
- description: Test if interupts occure during ping, when removing and adding instances
- instance_1_name: sdnvpn-10-1
- instance_2_name: sdnvpn-10-2
- instance_3_name: sdnvpn-10-3
- instance_4_name: sdnvpn-10-4
- image_name: sdnvpn-image
- net_1_name: sdnvpn-10-1-net
- subnet_1_name: sdnvpn-10-1-subnet
- subnet_1_cidr: 10.10.10.0/24
- router_1_name: sdnvpn-10-1-router
- secgroup_name: sdnvpn-sg
- secgroup_descr: Security group for SDNVPN test cases
+ enabled: true
+ order: 10
+ description: "Test if interupts occure during ping, when removing and \
+ adding instances"
+ instance_1_name: sdnvpn-10-1
+ instance_2_name: sdnvpn-10-2
+ instance_3_name: sdnvpn-10-3
+ instance_4_name: sdnvpn-10-4
+ image_name: sdnvpn-image
+ net_1_name: sdnvpn-10-1-net
+ subnet_1_name: sdnvpn-10-1-subnet
+ subnet_1_cidr: 10.10.10.0/24
+ router_1_name: sdnvpn-10-1-router
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
sdnvpn.test.functest.testcase_11:
- enabled: true
- description: Check relevant OVS groups are removed upon deletion of OpenStack topology
- instance_1_name: sdnvpn-11-1
- instance_2_name: sdnvpn-11-2
- image_name: sdnvpn-image
- net_1_name: sdnvpn-11-1-net
- subnet_1_name: sdnvpn-11-1-subnet
- subnet_1_cidr: 10.10.10.0/24
- router_1_name: sdnvpn-11-1-router
- secgroup_name: sdnvpn-sg
- secgroup_descr: Security group for SDNVPN test cases
+ enabled: true
+ order: 11
+ description: "Check relevant OVS groups are removed upon deletion of \
+ OpenStack topology"
+ instance_1_name: sdnvpn-11-1
+ instance_2_name: sdnvpn-11-2
+ image_name: sdnvpn-image
+ net_1_name: sdnvpn-11-1-net
+ subnet_1_name: sdnvpn-11-1-subnet
+ subnet_1_cidr: 10.10.10.0/24
+ router_1_name: sdnvpn-11-1-router
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
sdnvpn.test.functest.testcase_12:
- enabled: true
- description: Check Flows and Groups are reprogrammed after OVS reconnect
- instance_1_name: sdnvpn-12-1
- instance_2_name: sdnvpn-12-2
- image_name: sdnvpn-image
- net_1_name: sdnvpn-12-1-net
- subnet_1_name: sdnvpn-12-1-subnet
- subnet_1_cidr: 10.10.10.0/24
- secgroup_name: sdnvpn-sg
- secgroup_descr: Security group for SDNVPN test cases
+ enabled: true
+ order: 12
+ description: Check Flows and Groups are reprogrammed after OVS reconnect
+ instance_1_name: sdnvpn-12-1
+ instance_2_name: sdnvpn-12-2
+ image_name: sdnvpn-image
+ net_1_name: sdnvpn-12-1-net
+ subnet_1_name: sdnvpn-12-1-subnet
+ subnet_1_cidr: 10.10.10.0/24
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
sdnvpn.test.functest.testcase_13:
- enabled: true
- description: Testing extra route ECMP for intra-data center scenario
- instance_1_name: sdnvpn-13-1
- instance_2_name: sdnvpn-13-2
- instance_3_name: sdnvpn-13-3
- image_name: sdnvpn-image
- net_1_name: sdnvpn-13-1-net
- subnet_1_name: sdnvpn-13-1-subnet
- subnet_1_cidr: 10.10.10.0/24
- router_1_name: sdnvpn-13-1-router
- net_2_name: sdnvpn-13-2-net
- subnet_2_name: sdnvpn-13-2-subnet
- subnet_2_cidr: 10.10.11.0/24
- router_2_name: sdnvpn-13-2-router
- interface_name: lo
- interface_number: 1
- extra_route_cidr: 179.24.1.12/32
- extra_route_ip: 179.24.1.12
- extra_route_subnet_mask: 255.255.255.255
- extra_route_name: sdnvpn_extra_route_13
- secgroup_name: sdnvpn-sg
- secgroup_descr: Security group for SDNVPN test cases
- targets1: '88:88'
- targets2: '88:88'
- route_distinguishers:
- - '12:12'
- - '13:13'
+ enabled: true
+ order: 13
+ description: Testing extra route ECMP for intra-data center scenario
+ instance_1_name: sdnvpn-13-1
+ instance_2_name: sdnvpn-13-2
+ instance_3_name: sdnvpn-13-3
+ image_name: sdnvpn-image
+ net_1_name: sdnvpn-13-1-net
+ subnet_1_name: sdnvpn-13-1-subnet
+ subnet_1_cidr: 10.10.10.0/24
+ router_1_name: sdnvpn-13-1-router
+ net_2_name: sdnvpn-13-2-net
+ subnet_2_name: sdnvpn-13-2-subnet
+ subnet_2_cidr: 10.10.11.0/24
+ router_2_name: sdnvpn-13-2-router
+ interface_name: lo
+ interface_number: 1
+ extra_route_cidr: 179.24.1.12/32
+ extra_route_ip: 179.24.1.12
+ extra_route_subnet_mask: 255.255.255.255
+ extra_route_name: sdnvpn_extra_route_13
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
+ targets1: '88:88'
+ targets2: '88:88'
+ route_distinguishers:
+ - '12:12'
+ - '13:13'
diff --git a/sdnvpn/test/functest/run_sdnvpn_tests.py b/sdnvpn/test/functest/run_sdnvpn_tests.py
index 1a1d8f3..b1b242e 100644
--- a/sdnvpn/test/functest/run_sdnvpn_tests.py
+++ b/sdnvpn/test/functest/run_sdnvpn_tests.py
@@ -1,4 +1,4 @@
-#!/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -15,112 +15,144 @@ import sys
import traceback
import yaml
-from functest.core import feature as base
-from functest.utils import openstack_utils as os_utils
+from collections import OrderedDict
+from xtesting.core import feature
from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
from sdnvpn.lib.gather_logs import gather_logs
from sdnvpn.lib import utils as test_utils
-COMMON_CONFIG = sdnvpn_config.CommonConfig()
+logger = logging.getLogger(__name__)
+COMMON_CONFIG = sdnvpn_config.CommonConfig()
-class SdnvpnFunctest(base.Feature):
- __logger = logging.getLogger(__name__)
+class SdnvpnFunctest(feature.Feature):
def execute(self):
- nova_client = os_utils.get_nova_client()
- neutron_client = os_utils.get_neutron_client()
+ cloud = os_utils.get_os_cloud()
+ conn = os_utils.get_os_connection()
tenant_id = os_utils.get_tenant_id(os_utils.get_keystone_client(),
os.environ['OS_PROJECT_NAME'])
- neutron_quota = test_utils.get_neutron_quota(neutron_client, tenant_id)
- (neutron_nw_quota, neutron_subnet_quota, neutron_port_quota) = (
- neutron_quota['network'], neutron_quota['subnet'],
- neutron_quota['port'])
- instances_quota = test_utils.get_nova_instances_quota(nova_client)
+ neutron_quota = test_utils.get_neutron_quota(conn, tenant_id)
+ (neutron_nw_quota, neutron_subnet_quota, neutron_port_quota,
+ neutron_router_quota) = (
+ neutron_quota.networks, neutron_quota.subnets,
+ neutron_quota.ports, neutron_quota.routers)
+ instances_quota = test_utils.get_nova_instances_quota(cloud)
- self.__logger.info("Setting net/subnet/port quota to unlimited")
+ logger.info("Setting net/subnet/port/router "
+ "quota to unlimited")
test_utils.update_nw_subnet_port_quota(
- neutron_client,
+ conn,
tenant_id,
COMMON_CONFIG.neutron_nw_quota,
COMMON_CONFIG.neutron_subnet_quota,
- COMMON_CONFIG.neutron_port_quota)
+ COMMON_CONFIG.neutron_port_quota,
+ COMMON_CONFIG.neutron_router_quota)
+ test_utils.create_default_flavor()
# Workaround for
# https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-115
- self.__logger.info("Setting instances quota class to unlimited")
+ logger.info("Setting instances quota class to unlimited")
test_utils.update_instance_quota_class(
- nova_client,
- COMMON_CONFIG.nova_instances_quota_class)
+ cloud, COMMON_CONFIG.nova_instances_quota_class)
+
+ # Clean up the stale floating ip's so that required
+ # ip addresses are available for sdnvpn testcases
+ logger.info("Cleaning up the Floating IP Addresses")
+ floating_ips = os_utils.get_floating_ips(conn)
+ for floating_ip in floating_ips:
+ os_utils.delete_floating_ip(conn, floating_ip.id)
+
+ # Workaround for
+ # https://jira.opnfv.org/browse/SNAPS-318
+ # Clean up the stale routers
+ logger.info("Cleaning up the stale routers")
+ ports = os_utils.get_port_list(conn)
+ for port in ports:
+ if port.device_owner == 'network:router_interface':
+ os_utils.delete_neutron_port(conn, port.id)
+ routers = os_utils.get_router_list(conn)
+ for router in routers:
+ os_utils.remove_gateway_router(conn, router.id)
+ os_utils.delete_neutron_router(conn, router.id)
with open(COMMON_CONFIG.config_file) as f:
config_yaml = yaml.safe_load(f)
testcases = config_yaml.get("testcases")
+ testcases_ordered = OrderedDict(sorted(testcases.items(),
+ key=lambda x: x[1]['order']))
overall_status = "PASS"
- for tc in testcases:
- if testcases[tc]['enabled']:
+ for tc, test_sdnvpn in testcases_ordered.items():
+ if test_sdnvpn['enabled']:
test_name = tc
test_descr = testcases[tc]['description']
title = ("Running '%s - %s'" %
(test_name, test_descr))
- self.__logger.info(title)
- self.__logger.info("%s\n" % ("=" * len(title)))
- t = importlib.import_module(test_name, package=None)
+ logger.info(title)
+ logger.info("%s\n" % ("=" * len(title)))
try:
+ logger.info("Importing the testcase %s" % test_name)
+ t = importlib.import_module(test_name, package=None)
+ logger.info("Calling the testcase %s main method"
+ % test_name)
result = t.main()
+ logger.info("Execution is complete for the"
+ " testcase %s" % test_name)
except Exception as ex:
result = -1
- self.__logger.info("Caught Exception in %s: %s Trace: %s"
- % (test_name, ex,
- traceback.format_exc()))
+ logger.info("Caught Exception in %s: %s Trace: %s"
+ % (test_name, ex,
+ traceback.format_exc()))
if result < 0:
status = "FAIL"
overall_status = "FAIL"
- self.__logger.info("Testcase %s failed" % test_name)
+ logger.info("Testcase %s failed" % test_name)
else:
status = result.get("status")
self.details.update(
{test_name: {'status': status,
'details': result.get("details")}})
- self.__logger.info("Results of test case '%s - %s':\n%s\n"
- % (test_name, test_descr, result))
+ logger.info("Results of test case '%s - %s':\n%s\n"
+ % (test_name, test_descr, result))
if status == "FAIL":
overall_status = "FAIL"
- self.__logger.info("Resetting subnet/net/port quota")
- test_utils.update_nw_subnet_port_quota(neutron_client,
+ logger.info("Resetting subnet/net/port quota")
+ test_utils.update_nw_subnet_port_quota(conn,
tenant_id,
neutron_nw_quota,
neutron_subnet_quota,
- neutron_port_quota)
+ neutron_port_quota,
+ neutron_router_quota)
- self.__logger.info("Resetting instances quota class")
- test_utils.update_instance_quota_class(nova_client, instances_quota)
+ logger.info("Resetting instances quota class")
+ test_utils.update_instance_quota_class(cloud, instances_quota)
try:
installer_type = str(os.environ['INSTALLER_TYPE'].lower())
if installer_type in ["fuel", "apex"]:
gather_logs('overall')
else:
- self.__logger.info("Skipping log gathering because installer"
- "type %s is neither fuel nor apex" %
- installer_type)
+ logger.info("Skipping log gathering because installer"
+ "type %s is neither fuel nor apex" %
+ installer_type)
except Exception as ex:
- self.__logger.error(('Something went wrong in the Log gathering.'
- 'Ex: %s, Trace: %s')
- % (ex, traceback.format_exc()))
+ logger.error(('Something went wrong in the Log gathering.'
+ 'Ex: %s, Trace: %s')
+ % (ex, traceback.format_exc()))
if overall_status == "PASS":
self.result = 100
- return base.Feature.EX_OK
+ return feature.Feature.EX_OK
- return base.Feature.EX_RUN_ERROR
+ return feature.Feature.EX_RUN_ERROR
if __name__ == '__main__':
diff --git a/sdnvpn/test/functest/tempest.py b/sdnvpn/test/functest/tempest.py
deleted file mode 100644
index 5fca8cb..0000000
--- a/sdnvpn/test/functest/tempest.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-#
-import ConfigParser
-import logging
-import os
-import re
-import shutil
-
-import functest.opnfv_tests.openstack.tempest.conf_utils as tempest_utils
-
-logger = logging.getLogger('sdnvpn-tempest')
-
-
-def main():
- verifier_repo_dir = tempest_utils.get_verifier_repo_dir(None)
- src_tempest_dir = tempest_utils.get_verifier_deployment_dir(None, None)
-
- if not src_tempest_dir:
- logger.error("Rally deployment not found.")
- exit(-1)
-
- tempest_utils.configure_verifier(src_tempest_dir)
-
- src_tempest_conf = os.path.join(src_tempest_dir, 'tempest.conf')
- bgpvpn_tempest_conf = src_tempest_dir + '/bgpvpn_tempest.conf'
-
- if not os.path.isfile(src_tempest_conf):
- logger.error("tempest.conf not found in %s." % src_tempest_conf)
- exit(-1)
- shutil.copy(src_tempest_conf, bgpvpn_tempest_conf)
-
- logger.info("Copying tempest.conf to %s." % bgpvpn_tempest_conf)
- config = ConfigParser.RawConfigParser()
- config.read(bgpvpn_tempest_conf)
- config.set('service_available', 'bgpvpn', 'True')
- logger.debug("Updating %s with bgpvpn=True" % bgpvpn_tempest_conf)
- with open(bgpvpn_tempest_conf, 'wb') as tempest_conf:
- config.write(tempest_conf)
-
- cmd_line = (verifier_repo_dir +
- "/run_tempest.sh -C %s -t -N -- "
- "networking_bgpvpn_tempest" % bgpvpn_tempest_conf)
- logger.info("Executing: %s" % cmd_line)
- cmd = os.popen(cmd_line)
- output = cmd.read()
- logger.debug(output)
-
- # Results parsing
- error_logs = ""
- duration = 0
- failed = 0
- try:
- # Look For errors
- error_logs = ""
- for match in re.findall('(.*?)[. ]*FAILED', output):
- error_logs += match
- # look for duration
- m = re.search('tests in(.*)sec', output)
- duration = m.group(1)
- # Look for num tests run
- m = re.search('Ran:(.*)tests', output)
- num_tests = m.group(1)
- # Look for tests failed
- m = re.search('Failed:(.*)', output)
- failed = m.group(1)
- # Look for name of the tests
- testcases = re.findall("\{0\} (.*)", output)
-
- results = {"duration": duration,
- "num_tests": num_tests, "failed": failed,
- "tests": testcases}
- if int(failed) == 0:
- status = "PASS"
- else:
- status = "FAILED"
-
- return {"status": status, "details": results}
- except:
- logger.error("Problem when parsing the results.")
-
-
-if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO)
- main()
diff --git a/sdnvpn/test/functest/testcase_1.py b/sdnvpn/test/functest/testcase_1.py
index 89011cd..b524abf 100644
--- a/sdnvpn/test/functest/testcase_1.py
+++ b/sdnvpn/test/functest/testcase_1.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -11,9 +11,9 @@
import logging
import sys
-from functest.utils import openstack_utils as os_utils
from random import randint
from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
from sdnvpn.lib import utils as test_utils
from sdnvpn.lib.results import Results
@@ -25,37 +25,36 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
try:
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
- network_1_id = test_utils.create_net(neutron_client,
+ network_1_id = test_utils.create_net(conn,
TESTCASE_CONFIG.net_1_name)
- subnet_1_id = test_utils.create_subnet(neutron_client,
+ subnet_1_id = test_utils.create_subnet(conn,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
network_1_id)
- network_2_id = test_utils.create_net(neutron_client,
+ network_2_id = test_utils.create_net(conn,
TESTCASE_CONFIG.net_2_name)
- subnet_2_id = test_utils.create_subnet(neutron_client,
+ subnet_2_id = test_utils.create_subnet(conn,
TESTCASE_CONFIG.subnet_2_name,
TESTCASE_CONFIG.subnet_2_cidr,
network_2_id)
@@ -63,49 +62,49 @@ def main():
subnet_ids.extend([subnet_1_id, subnet_2_id])
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
# boot INTANCES
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
vm_3 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_3_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
- vm_3_ip = test_utils.get_instance_ip(vm_3)
+ vm_3_ip = test_utils.get_instance_ip(conn, vm_3)
vm_5 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_5_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
- vm_5_ip = test_utils.get_instance_ip(vm_5)
+ vm_5_ip = test_utils.get_instance_ip(conn, vm_5)
# We boot vm5 first because we need vm5_ip for vm4 userdata
u4 = test_utils.generate_ping_userdata([vm_5_ip])
vm_4 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_4_name,
image_id,
network_2_id,
@@ -113,7 +112,7 @@ def main():
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u4)
- vm_4_ip = test_utils.get_instance_ip(vm_4)
+ vm_4_ip = test_utils.get_instance_ip(conn, vm_4)
# We boot VM1 at the end because we need to get the IPs first
# to generate the userdata
@@ -122,7 +121,7 @@ def main():
vm_4_ip,
vm_5_ip])
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -192,11 +191,46 @@ def main():
results.add_to_summary(0, "-")
results.record_action(msg)
results.add_to_summary(0, "-")
- kwargs = {"import_targets": TESTCASE_CONFIG.targets1,
- "export_targets": TESTCASE_CONFIG.targets1,
- "name": vpn_name}
- bgpvpn = test_utils.update_bgpvpn(neutron_client,
- bgpvpn_id, **kwargs)
+
+ # use bgpvpn-create instead of update till NETVIRT-1067 bug is fixed
+ # kwargs = {"import_targets": TESTCASE_CONFIG.targets1,
+ # "export_targets": TESTCASE_CONFIG.targets1,
+ # "name": vpn_name}
+ # bgpvpn = test_utils.update_bgpvpn(neutron_client,
+ # bgpvpn_id, **kwargs)
+
+ test_utils.delete_bgpvpn(neutron_client, bgpvpn_id)
+ bgpvpn_ids.remove(bgpvpn_id)
+ kwargs = {
+ "import_targets": TESTCASE_CONFIG.targets1,
+ "export_targets": TESTCASE_CONFIG.targets1,
+ "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
+ "name": vpn_name
+ }
+
+ test_utils.wait_before_subtest()
+
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN re-created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.net_1_name)
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_1_id)
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_net_assocs(neutron_client,
+ bgpvpn_id,
+ network_1_id,
+ network_2_id)
+ # The above code has to be removed after re-enabling bgpvpn-update
logger.info("Waiting for the VMs to connect to each other using the"
" updated network configuration")
@@ -209,9 +243,9 @@ def main():
logger.error("exception occurred while executing testcase_1: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
@@ -219,5 +253,4 @@ def main():
if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO)
sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_10.py b/sdnvpn/test/functest/testcase_10.py
index 02956c4..3ba93a9 100644
--- a/sdnvpn/test/functest/testcase_10.py
+++ b/sdnvpn/test/functest/testcase_10.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -13,13 +13,12 @@ import re
import sys
import time
-from functest.utils import openstack_utils as os_utils
from multiprocessing import Process, Manager, Lock
from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
from sdnvpn.lib import utils as test_utils
from sdnvpn.lib.results import Results
-
logger = logging.getLogger('__name__')
std_out_lock = Lock()
@@ -29,14 +28,15 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
'sdnvpn.test.functest.testcase_10')
-def monitor(in_data, out_data, vm):
+def monitor(conn, in_data, out_data, vm):
# At the beginning of ping we might have some
# failures, so we ignore the first 10 pings
- lines_offset = 10
+ lines_offset = 20
while in_data["stop_thread"] is False:
try:
time.sleep(1)
- vm_console_out_lines = vm.get_console_output().split('\n')
+ vm_console_out_lines = conn.compute.\
+ get_server_console_output(vm)['output'].split('\n')
if lines_offset < len(vm_console_out_lines):
for console_line in vm_console_out_lines[lines_offset:-1]:
is_ping_error = re.match(r'ping.*KO', console_line)
@@ -46,38 +46,37 @@ def monitor(in_data, out_data, vm):
format(vm.name))
# Atomic write to std out
with std_out_lock:
- logging.error("Failure during ping from "
- "instance {}: {}".
- format(vm.name, console_line))
+ logger.error("Failure during ping from "
+ "instance {}: {}".
+ format(vm.name, console_line))
elif re.match(r'ping.*OK', console_line):
# Atomic write to std out
with std_out_lock:
- logging.info("Ping from instance {}: {}".
- format(vm.name, console_line))
+ logger.info("Ping from instance {}: {}".
+ format(vm.name, console_line))
lines_offset = len(vm_console_out_lines)
- except:
+ except Exception:
# Atomic write to std out
with std_out_lock:
- logging.error("Failure in monitor_thread of instance {}".
- format(vm.name))
+ logger.error("Failure in monitor_thread of instance {}".
+ format(vm.name))
# Return to main process
return
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
- image_id = os_utils.create_glance_image(glance_client,
+ image_id = os_utils.create_glance_image(conn,
TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path,
disk=COMMON_CONFIG.image_format,
@@ -85,9 +84,9 @@ def main():
public='public')
image_ids.append(image_id)
- network_1_id = test_utils.create_net(neutron_client,
+ network_1_id = test_utils.create_net(conn,
TESTCASE_CONFIG.net_1_name)
- subnet_1_id = test_utils.create_subnet(neutron_client,
+ subnet_1_id = test_utils.create_subnet(conn,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
network_1_id)
@@ -95,28 +94,28 @@ def main():
network_ids.append(network_1_id)
subnet_ids.append(subnet_1_id)
- sg_id = os_utils.create_security_group_full(neutron_client,
+ sg_id = os_utils.create_security_group_full(conn,
TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
# boot INSTANCES
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
- vm2_ip = test_utils.get_instance_ip(vm_2)
+ vm2_ip = test_utils.get_instance_ip(conn, vm_2)
- u1 = test_utils.generate_ping_userdata([vm2_ip], 1)
+ u1 = test_utils.generate_ping_userdata([vm2_ip])
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -124,11 +123,11 @@ def main():
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u1)
- vm1_ip = test_utils.get_instance_ip(vm_1)
+ vm1_ip = test_utils.get_instance_ip(conn, vm_1)
- u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip], 1)
+ u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip])
vm_3 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_3_name,
image_id,
network_1_id,
@@ -136,7 +135,7 @@ def main():
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2,
userdata=u3)
- vm3_ip = test_utils.get_instance_ip(vm_3)
+ vm3_ip = test_utils.get_instance_ip(conn, vm_3)
# We do not put vm_2 id in instance_ids table because we will
# delete the current instance during the testing process
instance_ids.extend([vm_1.id, vm_3.id])
@@ -154,30 +153,30 @@ def main():
monitor_output1 = m.dict()
monitor_input1["stop_thread"] = False
monitor_output1["error_msg"] = ""
- monitor_thread1 = Process(target=monitor, args=(monitor_input1,
+ monitor_thread1 = Process(target=monitor, args=(conn, monitor_input1,
monitor_output1, vm_1,))
monitor_input2 = m.dict()
monitor_output2 = m.dict()
monitor_input2["stop_thread"] = False
monitor_output2["error_msg"] = ""
- monitor_thread2 = Process(target=monitor, args=(monitor_input2,
+ monitor_thread2 = Process(target=monitor, args=(conn, monitor_input2,
monitor_output2, vm_2,))
monitor_input3 = m.dict()
monitor_output3 = m.dict()
monitor_input3["stop_thread"] = False
monitor_output3["error_msg"] = ""
- monitor_thread3 = Process(target=monitor, args=(monitor_input3,
+ monitor_thread3 = Process(target=monitor, args=(conn, monitor_input3,
monitor_output3, vm_3,))
# Lists of all monitor threads and their inputs and outputs.
threads = [monitor_thread1, monitor_thread2, monitor_thread3]
thread_inputs = [monitor_input1, monitor_input2, monitor_input3]
thread_outputs = [monitor_output1, monitor_output2, monitor_output3]
try:
- logging.info("Starting all monitor threads")
+ logger.info("Starting all monitor threads")
# Start all monitor threads
for thread in threads:
thread.start()
- logging.info("Wait before subtest")
+ logger.info("Wait before subtest")
test_utils.wait_before_subtest()
monitor_err_msg = ""
for thread_output in thread_outputs:
@@ -192,14 +191,21 @@ def main():
results.add_failure(monitor_err_msg)
# Stop monitor thread 2 and delete instance vm_2
thread_inputs[1]["stop_thread"] = True
- if not os_utils.delete_instance(nova_client, vm_2.id):
- logging.error("Fail to delete vm_2 instance during "
- "testing process")
+ if not os_utils.delete_instance(conn, vm_2.id):
+ logger.error("Fail to delete vm_2 instance during "
+ "testing process")
raise Exception("Fail to delete instance vm_2.")
+ for thread_input in thread_inputs:
+ thread_input["stop_thread"] = True
+ for thread in threads:
+ thread.join()
+ threads = []
+ thread_inputs = []
+ thread_outputs = []
# Create a new vm (vm_4) on compute 1 node
- u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip], 1)
+ u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip])
vm_4 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_4_name,
image_id,
network_1_id,
@@ -219,14 +225,14 @@ def main():
monitor_output4 = m.dict()
monitor_input4["stop_thread"] = False
monitor_output4["error_msg"] = ""
- monitor_thread4 = Process(target=monitor, args=(monitor_input4,
+ monitor_thread4 = Process(target=monitor, args=(conn, monitor_input4,
monitor_output4,
vm_4,))
threads.append(monitor_thread4)
thread_inputs.append(monitor_input4)
thread_outputs.append(monitor_output4)
- logging.info("Starting monitor thread of vm_4")
- threads[3].start()
+ logger.info("Starting monitor thread of vm_4")
+ threads[0].start()
test_utils.wait_before_subtest()
monitor_err_msg = ""
for thread_output in thread_outputs:
@@ -246,22 +252,21 @@ def main():
raise
finally:
# Give a stop signal to all threads
- logging.info("Sending stop signal to monitor thread")
+ logger.info("Sending stop signal to monitor thread")
for thread_input in thread_inputs:
thread_input["stop_thread"] = True
# Wait for all threads to stop and return to the main process
for thread in threads:
thread.join()
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
- interfaces, subnet_ids, router_ids,
- network_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
+ bgpvpn_ids, interfaces, subnet_ids,
+ router_ids, network_ids)
return results.compile_summary()
if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO)
sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_11.py b/sdnvpn/test/functest/testcase_11.py
index 40de205..fd2c74a 100644
--- a/sdnvpn/test/functest/testcase_11.py
+++ b/sdnvpn/test/functest/testcase_11.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -11,8 +11,8 @@
import logging
import sys
-from functest.utils import openstack_utils as os_utils
from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
from sdnvpn.lib import utils as test_utils
from sdnvpn.lib.results import Results
@@ -24,15 +24,14 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
openstack_nodes = test_utils.get_nodes()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
@@ -40,14 +39,14 @@ def main():
try:
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
- network_1_id = test_utils.create_net(neutron_client,
+ network_1_id = test_utils.create_net(conn,
TESTCASE_CONFIG.net_1_name)
- subnet_1_id = test_utils.create_subnet(neutron_client,
+ subnet_1_id = test_utils.create_subnet(conn,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
network_1_id)
@@ -56,12 +55,11 @@ def main():
subnet_ids.append(subnet_1_id)
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
# Check required number of compute nodes
- compute_hostname = (
- nova_client.hypervisors.list()[0].hypervisor_hostname)
+ compute_hostname = conn.compute.hypervisors().next().name
compute_nodes = [node for node in openstack_nodes
if node.is_compute()]
@@ -74,7 +72,7 @@ def main():
# boot INSTANCES
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
@@ -83,7 +81,7 @@ def main():
compute_node=av_zone_1)
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -99,7 +97,7 @@ def main():
logger.error("One or more instances is down")
# TODO: Handle this appropriately
- logging.info("Wait before subtest")
+ logger.info("Wait before subtest")
test_utils.wait_before_subtest()
# Get added OVS groups
added_ovs_groups = (len(initial_ovs_groups) -
@@ -128,16 +126,16 @@ def main():
raise
finally:
# Cleanup topology
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
- interfaces, subnet_ids, router_ids,
- network_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
+ bgpvpn_ids, interfaces, subnet_ids,
+ router_ids, network_ids)
# Connect again OVS to Controller
for compute_node in compute_nodes:
compute_node.run_cmd("sudo ovs-vsctl set-controller {} {}".
format(ovs_br, ovs_controller_conn))
- logging.info("Wait before subtest")
+ logger.info("Wait before subtest")
test_utils.wait_before_subtest()
# Get OVS groups added after the reconnection
added_ovs_groups = (len(initial_ovs_groups) -
@@ -162,5 +160,4 @@ def main():
if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO)
sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_12.py b/sdnvpn/test/functest/testcase_12.py
index e6a7ac5..6bb8140 100644
--- a/sdnvpn/test/functest/testcase_12.py
+++ b/sdnvpn/test/functest/testcase_12.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -11,8 +11,8 @@
import logging
import sys
-from functest.utils import openstack_utils as os_utils
from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
from sdnvpn.lib import utils as test_utils
from sdnvpn.lib.results import Results
@@ -24,15 +24,14 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
openstack_nodes = test_utils.get_nodes()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
@@ -40,14 +39,14 @@ def main():
try:
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
- network_1_id = test_utils.create_net(neutron_client,
+ network_1_id = test_utils.create_net(conn,
TESTCASE_CONFIG.net_1_name)
- subnet_1_id = test_utils.create_subnet(neutron_client,
+ subnet_1_id = test_utils.create_subnet(conn,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
network_1_id)
@@ -56,12 +55,11 @@ def main():
subnet_ids.append(subnet_1_id)
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
# Check required number of compute nodes
- compute_hostname = (
- nova_client.hypervisors.list()[0].hypervisor_hostname)
+ compute_hostname = conn.compute.hypervisors().next().name
compute_nodes = [node for node in openstack_nodes
if node.is_compute()]
@@ -76,7 +74,7 @@ def main():
# boot INSTANCES
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
@@ -85,7 +83,7 @@ def main():
compute_node=av_zone_1)
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -100,7 +98,7 @@ def main():
if not instances_up:
logger.error("One or more instances is down")
- logging.info("Wait before subtest")
+ logger.info("Wait before subtest")
test_utils.wait_before_subtest()
# Get added OVS flows and groups
added_ovs_flows = len(test_utils.get_ovs_flows(compute_nodes,
@@ -140,7 +138,7 @@ def main():
compute_node.run_cmd("sudo ovs-vsctl set-controller {} {}".
format(ovs_br, ovs_controller_conn))
- logging.info("Wait before subtest resync type 1")
+ logger.info("Wait before subtest resync type 1")
test_utils.wait_before_subtest()
# Get OVS flows added after the reconnection
resynced_ovs_flows = len(test_utils.get_ovs_flows(
@@ -164,7 +162,7 @@ def main():
compute_node.run_cmd("sudo iptables -D OUTPUT -p tcp --dport 6653"
" -j DROP")
- logging.info("Wait before subtest resync type 2")
+ logger.info("Wait before subtest resync type 2")
test_utils.wait_before_subtest()
# Get OVS flows added after the reconnection
resynced_ovs_flows = len(test_utils.get_ovs_flows(
@@ -185,11 +183,11 @@ def main():
raise
finally:
# Cleanup topology
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
- interfaces, subnet_ids, router_ids,
- network_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
+ bgpvpn_ids, interfaces, subnet_ids,
+ router_ids, network_ids)
return results.compile_summary()
@@ -202,7 +200,9 @@ def record_test_result(expected_flow_count, actual_flow_count,
" actual flow count %s" % (str(expected_flow_count),
str(actual_flow_count)))
results.add_to_summary(0, "-")
- if expected_flow_count == actual_flow_count:
+ # Using <= for flow validation because ODL adds some more
+ # ARP/ICMP flows after VMs spawn up
+ if expected_flow_count <= actual_flow_count:
results.add_success(msg)
else:
results.add_failure(msg)
@@ -220,5 +220,4 @@ def record_test_result(expected_flow_count, actual_flow_count,
if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO)
sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_13.py b/sdnvpn/test/functest/testcase_13.py
index ec0459d..e15c8f1 100644
--- a/sdnvpn/test/functest/testcase_13.py
+++ b/sdnvpn/test/functest/testcase_13.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -9,11 +9,12 @@
#
import logging
+import os
import sys
-from functest.utils import openstack_utils as os_utils
from random import randint
from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
from sdnvpn.lib import utils as test_utils
from sdnvpn.lib.results import Results
@@ -25,22 +26,32 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
+ if not os.path.isfile(COMMON_CONFIG.ubuntu_image_path):
+ logger.info("Downloading image")
+ image_dest_path = '/'.join(
+ COMMON_CONFIG.ubuntu_image_path.split('/')[:-1])
+ os_utils.download_url(
+ "http://artifacts.opnfv.org/sdnvpn/"
+ "ubuntu-16.04-server-cloudimg-amd64-disk1.img",
+ image_dest_path)
+ else:
+ logger.info("Using old image")
+
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids, flavor_ids) = ([] for i in range(9))
try:
image_id = os_utils.create_glance_image(
- glance_client,
+ conn,
COMMON_CONFIG.ubuntu_image_name,
COMMON_CONFIG.ubuntu_image_path,
disk="qcow2",
@@ -52,7 +63,7 @@ def main():
flavor_ids.append(flavor_id)
network_1_id, subnet_1_id, router_1_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_1_name,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
@@ -64,10 +75,10 @@ def main():
router_ids.extend([router_1_id])
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
@@ -79,7 +90,7 @@ def main():
TESTCASE_CONFIG.extra_route_subnet_mask)
# boot INTANCES
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -88,18 +99,18 @@ def main():
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u1)
- vm_1_ip = test_utils.get_instance_ip(vm_1)
+ vm_1_ip = test_utils.get_instance_ip(conn, vm_1)
- vm1_port = test_utils.get_port(neutron_client, vm_1.id)
+ vm1_port = test_utils.get_port(conn, vm_1.id)
test_utils.update_port_allowed_address_pairs(
- neutron_client,
- vm1_port['id'],
+ conn,
+ vm1_port.id,
[test_utils.AllowedAddressPair(
TESTCASE_CONFIG.extra_route_cidr,
- vm1_port['mac_address'])])
+ vm1_port.mac_address)])
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
@@ -108,18 +119,47 @@ def main():
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u1)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
- vm2_port = test_utils.get_port(neutron_client, vm_2.id)
+ vm2_port = test_utils.get_port(conn, vm_2.id)
test_utils.update_port_allowed_address_pairs(
- neutron_client,
- vm2_port['id'],
+ conn,
+ vm2_port.id,
[test_utils.AllowedAddressPair(
TESTCASE_CONFIG.extra_route_cidr,
- vm2_port['mac_address'])])
+ vm2_port.mac_address)])
test_utils.async_Wait_for_instances([vm_1, vm_2])
+ image_2_id = os_utils.create_glance_image(
+ conn, TESTCASE_CONFIG.image_name,
+ COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
+ container="bare", public='public')
+ image_ids.append(image_2_id)
+ # Moved vm_3 creation before associating its network/router with
+ # bgpvpn. If VM is created after its network is associated to bgpvpn
+ # via router, then BGPVPN in ODL uses router's vrf id for newly created
+ # VMs which causes testcase to fail.
+ u3 = test_utils.generate_ping_userdata(
+ [TESTCASE_CONFIG.extra_route_ip])
+ vm_3 = test_utils.create_instance(
+ conn,
+ TESTCASE_CONFIG.instance_3_name,
+ image_2_id,
+ network_1_id,
+ sg_id,
+ flavor=COMMON_CONFIG.custom_flavor_name,
+ secgroup_name=TESTCASE_CONFIG.secgroup_name,
+ compute_node=av_zone_2,
+ userdata=u3)
+
+ instance_ids.extend([vm_1.id, vm_2.id, vm_3.id])
+
+ instance_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_3)
+
+ if (not instance_dhcp_up):
+ logger.error("vm_3 instance is down")
+
msg = ("Create VPN with multiple RDs")
results.record_action(msg)
vpn_name = "sdnvpn-" + str(randint(100000, 999999))
@@ -143,42 +183,16 @@ def main():
neutron_client, bgpvpn_id, router_1_id)
test_utils.update_router_extra_route(
- neutron_client, router_1_id,
+ conn, router_1_id,
[test_utils.ExtraRoute(TESTCASE_CONFIG.extra_route_cidr,
vm_1_ip),
test_utils.ExtraRoute(TESTCASE_CONFIG.extra_route_cidr,
vm_2_ip)])
- image_2_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
- COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
- container="bare", public='public')
- image_ids.append(image_2_id)
-
logger.info("Waiting for the VMs to connect to each other using the"
" updated network configuration")
test_utils.wait_before_subtest()
- u3 = test_utils.generate_ping_userdata(
- [TESTCASE_CONFIG.extra_route_ip])
- vm_3 = test_utils.create_instance(
- nova_client,
- TESTCASE_CONFIG.instance_3_name,
- image_2_id,
- network_1_id,
- sg_id,
- flavor=COMMON_CONFIG.custom_flavor_name,
- secgroup_name=TESTCASE_CONFIG.secgroup_name,
- compute_node=av_zone_2,
- userdata=u3)
-
- instance_ids.extend([vm_1.id, vm_2.id, vm_3.id])
-
- instance_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_3)
-
- if (not instance_dhcp_up):
- logger.error("vm_3 instance is down")
-
results.get_ping_status_target_ip(vm_3,
TESTCASE_CONFIG.extra_route_name,
TESTCASE_CONFIG.extra_route_ip,
@@ -192,10 +206,10 @@ def main():
logger.error("exception occurred while executing testcase_13: %s", e)
raise
finally:
- test_utils.update_router_no_extra_route(neutron_client, router_ids)
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ test_utils.update_router_no_extra_route(conn, router_ids)
+ test_utils.cleanup_nova(conn, instance_ids, flavor_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
@@ -203,5 +217,4 @@ def main():
if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO)
sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_1bis.py b/sdnvpn/test/functest/testcase_1bis.py
new file mode 100644
index 0000000..30a0abf
--- /dev/null
+++ b/sdnvpn/test/functest/testcase_1bis.py
@@ -0,0 +1,209 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2018 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import logging
+import sys
+import pkg_resources
+
+from random import randint
+from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
+from sdnvpn.lib import utils as test_utils
+from sdnvpn.lib.results import Results
+
+logger = logging.getLogger(__name__)
+
+COMMON_CONFIG = sdnvpn_config.CommonConfig()
+TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
+ 'sdnvpn.test.functest.testcase_1bis')
+
+
+def main():
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
+
+ results.add_to_summary(0, "=")
+ results.add_to_summary(2, "STATUS", "SUBTEST")
+ results.add_to_summary(0, "=")
+
+ conn = os_utils.get_os_connection()
+ # neutron client is needed as long as bgpvpn heat module
+ # is not yet installed by default in apex (APEX-618)
+ neutron_client = os_utils.get_neutron_client()
+
+ image_ids = []
+ bgpvpn_ids = []
+
+ try:
+ # image created outside HOT (OS::Glance::Image deprecated since ocata)
+ image_id = os_utils.create_glance_image(
+ conn, TESTCASE_CONFIG.image_name,
+ COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
+ container="bare", public='public')
+ image_ids = [image_id]
+
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
+ az_1 = "nova:" + compute_nodes[0]
+ az_2 = "nova:" + compute_nodes[1]
+
+ file_path = pkg_resources.resource_filename(
+ 'sdnvpn', TESTCASE_CONFIG.hot_file_name)
+ templ = open(file_path, 'r').read()
+ logger.debug("Template is read: '%s'" % templ)
+ env = test_utils.get_heat_environment(TESTCASE_CONFIG, COMMON_CONFIG)
+ logger.debug("Environment is read: '%s'" % env)
+
+ env['name'] = TESTCASE_CONFIG.stack_name
+ env['template'] = templ
+ env['parameters']['image_n'] = TESTCASE_CONFIG.image_name
+ env['parameters']['av_zone_1'] = az_1
+ env['parameters']['av_zone_2'] = az_2
+
+ stack_id = os_utils.create_stack(conn, **env)
+ if stack_id is None:
+ logger.error("Stack create start failed")
+ raise SystemError("Stack create start failed")
+
+ test_utils.wait_stack_for_status(conn, stack_id, 'CREATE_COMPLETE')
+
+ net_1_output = os_utils.get_output(conn, stack_id, 'net_1_o')
+ network_1_id = net_1_output['output_value']
+ net_2_output = os_utils.get_output(conn, stack_id, 'net_2_o')
+ network_2_id = net_2_output['output_value']
+
+ vm_stack_output_keys = ['vm1_o', 'vm2_o', 'vm3_o', 'vm4_o', 'vm5_o']
+ vms = test_utils.get_vms_from_stack_outputs(conn,
+ stack_id,
+ vm_stack_output_keys)
+
+ logger.debug("Entering base test case with stack '%s'" % stack_id)
+
+ msg = ("Create VPN with eRT<>iRT")
+ results.record_action(msg)
+ vpn_name = "sdnvpn-" + str(randint(100000, 999999))
+ kwargs = {
+ "import_targets": TESTCASE_CONFIG.targets1,
+ "export_targets": TESTCASE_CONFIG.targets2,
+ "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
+ "name": vpn_name
+ }
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['net_1_name'])
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_1_id)
+
+ # Remember: vms[X] is former vm_X+1
+
+ results.get_ping_status(vms[0], vms[1], expected="PASS", timeout=200)
+ results.get_ping_status(vms[0], vms[2], expected="PASS", timeout=30)
+ results.get_ping_status(vms[0], vms[3], expected="FAIL", timeout=30)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['net_2_name'])
+ results.add_to_summary(0, "-")
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_net_assocs(neutron_client,
+ bgpvpn_id,
+ network_1_id,
+ network_2_id)
+
+ logger.info("Waiting for the VMs to connect to each other using the"
+ " updated network configuration")
+ test_utils.wait_before_subtest()
+
+ results.get_ping_status(vms[3], vms[4], expected="PASS", timeout=30)
+ # TODO enable again when isolation in VPN with iRT != eRT works
+ # results.get_ping_status(vms[0], vms[3], expected="FAIL", timeout=30)
+ # results.get_ping_status(vms[0], vms[4], expected="FAIL", timeout=30)
+
+ msg = ("Update VPN with eRT=iRT ...")
+ results.add_to_summary(0, "-")
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ # use bgpvpn-create instead of update till NETVIRT-1067 bug is fixed
+ # kwargs = {"import_targets": TESTCASE_CONFIG.targets1,
+ # "export_targets": TESTCASE_CONFIG.targets1,
+ # "name": vpn_name}
+ # bgpvpn = test_utils.update_bgpvpn(neutron_client,
+ # bgpvpn_id, **kwargs)
+
+ test_utils.delete_bgpvpn(neutron_client, bgpvpn_id)
+ bgpvpn_ids.remove(bgpvpn_id)
+ kwargs = {
+ "import_targets": TESTCASE_CONFIG.targets1,
+ "export_targets": TESTCASE_CONFIG.targets1,
+ "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
+ "name": vpn_name
+ }
+
+ test_utils.wait_before_subtest()
+
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN re-created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['net_1_name'])
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_1_id)
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_net_assocs(neutron_client,
+ bgpvpn_id,
+ network_1_id,
+ network_2_id)
+ # The above code has to be removed after re-enabling bgpvpn-update
+
+ logger.info("Waiting for the VMs to connect to each other using the"
+ " updated network configuration")
+ test_utils.wait_before_subtest()
+
+ results.get_ping_status(vms[0], vms[3], expected="PASS", timeout=30)
+ results.get_ping_status(vms[0], vms[4], expected="PASS", timeout=30)
+
+ except Exception as e:
+ logger.error("exception occurred while executing testcase_1bis: %s", e)
+ raise
+ finally:
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, [], bgpvpn_ids,
+ [], [], [], [])
+
+ try:
+ test_utils.delete_stack_and_wait(conn, stack_id)
+ except Exception as e:
+ logger.error(
+ "exception occurred while executing testcase_1bis: %s", e)
+
+ return results.compile_summary()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_2.py b/sdnvpn/test/functest/testcase_2.py
index d136d8f..b4f05b2 100644
--- a/sdnvpn/test/functest/testcase_2.py
+++ b/sdnvpn/test/functest/testcase_2.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -8,12 +8,13 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
+import base64
import logging
import sys
-from functest.utils import openstack_utils as os_utils
from random import randint
from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
from sdnvpn.lib import utils as test_utils
from sdnvpn.lib.results import Results
@@ -25,15 +26,14 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
@@ -44,40 +44,41 @@ def main():
keyfile = open(COMMON_CONFIG.keyfile_path, 'r')
key = keyfile.read()
keyfile.close()
- files = {"/home/cirros/id_rsa": key}
+ files = [{'path': '/home/cirros/id_rsa',
+ 'contents': base64.b64encode(key)}]
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
network_1_id = test_utils.create_net(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_1_name)
subnet_1a_id = test_utils.create_subnet(
- neutron_client,
+ conn,
TESTCASE_CONFIG.subnet_1a_name,
TESTCASE_CONFIG.subnet_1a_cidr,
network_1_id)
# TODO: uncomment the commented lines once ODL has
# support for mulitple subnets under same neutron network
# subnet_1b_id = test_utils.create_subnet(
- # neutron_client,
+ # conn,
# TESTCASE_CONFIG.subnet_1b_name,
# TESTCASE_CONFIG.subnet_1b_cidr,
# network_1_id)
network_2_id = test_utils.create_net(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_2_name)
# subnet_2a_id = test_utils.create_subnet(
- # neutron_client,
+ # conn,
# TESTCASE_CONFIG.subnet_2a_name,
# TESTCASE_CONFIG.subnet_2a_cidr,
# network_2_id)
subnet_2b_id = test_utils.create_subnet(
- neutron_client,
+ conn,
TESTCASE_CONFIG.subnet_2b_name,
TESTCASE_CONFIG.subnet_2b_cidr,
network_2_id)
@@ -88,10 +89,10 @@ def main():
subnet_2b_id])
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
# av_zone_2 = "nova:" + compute_nodes[1]
@@ -99,7 +100,7 @@ def main():
# boot INTANCES
userdata_common = test_utils.generate_userdata_common()
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
@@ -111,7 +112,7 @@ def main():
# vm_3 = test_utils.create_instance(
-# nova_client,
+# conn,
# TESTCASE_CONFIG.instance_3_name,
# image_id,
# network_1_id,
@@ -122,7 +123,7 @@ def main():
# userdata=userdata_common)
#
# vm_5 = test_utils.create_instance(
-# nova_client,
+# conn,
# TESTCASE_CONFIG.instance_5_name,
# image_id,
# network_2_id,
@@ -139,7 +140,7 @@ def main():
# TESTCASE_CONFIG.instance_5_ip
])
vm_4 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_4_name,
image_id,
network_2_id,
@@ -159,7 +160,7 @@ def main():
# TESTCASE_CONFIG.instance_5_ip
])
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -267,9 +268,9 @@ def main():
logger.error("exception occurred while executing testcase_2: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
@@ -277,5 +278,4 @@ def main():
if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO)
sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_2bis.py b/sdnvpn/test/functest/testcase_2bis.py
new file mode 100644
index 0000000..3736c0c
--- /dev/null
+++ b/sdnvpn/test/functest/testcase_2bis.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2018 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import base64
+import logging
+import sys
+import pkg_resources
+
+from random import randint
+from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
+from sdnvpn.lib import utils as test_utils
+from sdnvpn.lib.results import Results
+
+logger = logging.getLogger(__name__)
+
+COMMON_CONFIG = sdnvpn_config.CommonConfig()
+TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
+ 'sdnvpn.test.functest.testcase_2bis')
+
+
+def main():
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
+
+ results.add_to_summary(0, '=')
+ results.add_to_summary(2, 'STATUS', 'SUBTEST')
+ results.add_to_summary(0, '=')
+
+ conn = os_utils.get_os_connection()
+ # neutron client is needed as long as bgpvpn heat module
+ # is not yet installed by default in apex (APEX-618)
+ neutron_client = os_utils.get_neutron_client()
+
+ image_ids = []
+ bgpvpn_ids = []
+
+ try:
+ logger.debug("Using private key %s injected to the VMs."
+ % COMMON_CONFIG.keyfile_path)
+ keyfile = open(COMMON_CONFIG.keyfile_path, 'r')
+ key_buf = keyfile.read()
+ keyfile.close()
+ key = base64.b64encode(key_buf)
+
+ # image created outside HOT (OS::Glance::Image deprecated since ocata)
+ image_id = os_utils.create_glance_image(
+ conn, TESTCASE_CONFIG.image_name,
+ COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
+ container='bare', public='public')
+ image_ids = [image_id]
+
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
+
+ az_1 = 'nova:' + compute_nodes[0]
+ # av_zone_2 = "nova:" + compute_nodes[1]
+
+ file_path = pkg_resources.resource_filename(
+ 'sdnvpn', TESTCASE_CONFIG.hot_file_name)
+ templ = open(file_path, 'r').read()
+ logger.debug("Template is read: '%s'" % templ)
+ env = test_utils.get_heat_environment(TESTCASE_CONFIG, COMMON_CONFIG)
+ logger.debug("Environment is read: '%s'" % env)
+
+ env['name'] = TESTCASE_CONFIG.stack_name
+ env['template'] = templ
+ env['parameters']['image_n'] = TESTCASE_CONFIG.image_name
+ env['parameters']['av_zone_1'] = az_1
+ env['parameters']['id_rsa_key'] = key
+
+ stack_id = os_utils.create_stack(conn, **env)
+ if stack_id is None:
+ logger.error('Stack create start failed')
+ raise SystemError('Stack create start failed')
+
+ test_utils.wait_stack_for_status(conn, stack_id, 'CREATE_COMPLETE')
+
+ net_1_output = os_utils.get_output(conn, stack_id, 'net_1_o')
+ network_1_id = net_1_output['output_value']
+ net_2_output = os_utils.get_output(conn, stack_id, 'net_2_o')
+ network_2_id = net_2_output['output_value']
+
+ vm_stack_output_keys = ['vm1_o', 'vm2_o', 'vm3_o', 'vm4_o', 'vm5_o']
+ vms = test_utils.get_vms_from_stack_outputs(conn,
+ stack_id,
+ vm_stack_output_keys)
+
+ logger.debug("Entering base test case with stack '%s'" % stack_id)
+
+ msg = ('Create VPN1 with eRT=iRT')
+ results.record_action(msg)
+ vpn1_name = 'sdnvpn-1-' + str(randint(100000, 999999))
+ kwargs = {
+ 'import_targets': TESTCASE_CONFIG.targets2,
+ 'export_targets': TESTCASE_CONFIG.targets2,
+ 'route_targets': TESTCASE_CONFIG.targets2,
+ 'route_distinguishers': TESTCASE_CONFIG.route_distinguishers1,
+ 'name': vpn1_name
+ }
+ bgpvpn1 = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn1_id = bgpvpn1['bgpvpn']['id']
+ logger.debug("VPN1 created details: %s" % bgpvpn1)
+ bgpvpn_ids.append(bgpvpn1_id)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['net_1_name'])
+ results.record_action(msg)
+ results.add_to_summary(0, '-')
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn1_id, network_1_id)
+
+ logger.info('Waiting for the VMs to connect to each other using the'
+ ' updated network configuration for VPN1')
+ test_utils.wait_before_subtest()
+
+ # Remember: vms[X] has instance_X+1_name
+
+ # 10.10.10.12 should return sdnvpn-2 to sdnvpn-1
+ results.check_ssh_output(
+ vms[0], vms[1],
+ expected=TESTCASE_CONFIG.heat_parameters['instance_2_name'],
+ timeout=200)
+
+ results.add_to_summary(0, '-')
+ msg = ('Create VPN2 with eRT=iRT')
+ results.record_action(msg)
+ vpn2_name = 'sdnvpn-2-' + str(randint(100000, 999999))
+ kwargs = {
+ 'import_targets': TESTCASE_CONFIG.targets1,
+ 'export_targets': TESTCASE_CONFIG.targets1,
+ 'route_targets': TESTCASE_CONFIG.targets1,
+ 'route_distinguishers': TESTCASE_CONFIG.route_distinguishers2,
+ 'name': vpn2_name
+ }
+ bgpvpn2 = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn2_id = bgpvpn2['bgpvpn']['id']
+ logger.debug("VPN created details: %s" % bgpvpn2)
+ bgpvpn_ids.append(bgpvpn2_id)
+
+ msg = ("Associate network '%s' to the VPN2." %
+ TESTCASE_CONFIG.heat_parameters['net_2_name'])
+ results.record_action(msg)
+ results.add_to_summary(0, '-')
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn2_id, network_2_id)
+
+ test_utils.wait_for_bgp_net_assoc(neutron_client,
+ bgpvpn1_id, network_1_id)
+ test_utils.wait_for_bgp_net_assoc(neutron_client,
+ bgpvpn2_id, network_2_id)
+
+ logger.info('Waiting for the VMs to connect to each other using the'
+ ' updated network configuration for VPN2')
+ test_utils.wait_before_subtest()
+
+ # 10.10.10.11 should return 'not reachable' to sdnvpn-4
+ results.check_ssh_output(vms[3], vms[0],
+ expected='not reachable',
+ timeout=30)
+
+ except Exception as e:
+ logger.error("exception occurred while executing testcase_2bis: %s", e)
+ raise
+ finally:
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, [], bgpvpn_ids,
+ [], [], [], [])
+
+ try:
+ test_utils.delete_stack_and_wait(conn, stack_id)
+ except Exception as e:
+ logger.error(
+ "exception occurred while executing testcase_2bis: %s", e)
+
+ return results.compile_summary()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_3.py b/sdnvpn/test/functest/testcase_3.py
index 88fb421..48024cb 100644
--- a/sdnvpn/test/functest/testcase_3.py
+++ b/sdnvpn/test/functest/testcase_3.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -15,10 +17,10 @@
import logging
import os
import sys
+import time
-from functest.utils import functest_utils as ft_utils
-from functest.utils import openstack_utils as os_utils
from sdnvpn.lib import quagga
+from sdnvpn.lib import openstack_utils as os_utils
from sdnvpn.lib import utils as test_utils
from sdnvpn.lib import config as sdnvpn_config
from sdnvpn.lib.results import Results
@@ -32,84 +34,120 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
openstack_nodes = test_utils.get_nodes()
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
# node.is_odl() doesn't work in Apex
# https://jira.opnfv.org/browse/RELENG-192
- controllers = [node for node in openstack_nodes
- if "running" in
- node.run_cmd("sudo systemctl status opendaylight")]
+ fuel_cmd = "sudo systemctl status opendaylight"
+ apex_cmd = "sudo docker exec opendaylight_api " \
+ "/opt/opendaylight/bin/status"
+ health_cmd = "sudo docker ps -f name=opendaylight_api -f " \
+ "health=healthy -q"
+ if installer_type in ["fuel"]:
+ odl_nodes = [node for node in openstack_nodes
+ if "running" in node.run_cmd(fuel_cmd)]
+ elif installer_type in ["apex"]:
+ odl_nodes = [node for node in openstack_nodes
+ if node.run_cmd(health_cmd)
+ if "Running" in node.run_cmd(apex_cmd)]
+ else:
+ logger.error("Incompatible installer type")
+
computes = [node for node in openstack_nodes if node.is_compute()]
msg = ("Verify that OpenDaylight can start/communicate with zrpcd/Quagga")
results.record_action(msg)
results.add_to_summary(0, "-")
- if not controllers:
- msg = ("Controller (ODL) list is empty. Skipping rest of tests.")
+ if not odl_nodes:
+ msg = ("ODL node list is empty. Skipping rest of tests.")
logger.info(msg)
results.add_failure(msg)
return results.compile_summary()
else:
- msg = ("Controller (ODL) list is ready")
+ msg = ("ODL node list is ready")
logger.info(msg)
results.add_success(msg)
- controller = controllers[0] # We don't handle HA well
- get_ext_ip_cmd = "sudo ip a | grep br-ex | grep inet | awk '{print $2}'"
- ext_net_cidr = controller.run_cmd(get_ext_ip_cmd).strip().split('\n')
- ext_net_mask = ext_net_cidr[0].split('/')[1]
- controller_ext_ip = ext_net_cidr[0].split('/')[0]
-
- logger.info("Starting bgp speaker of controller at IP %s "
- % controller_ext_ip)
logger.info("Checking if zrpcd is "
- "running on the controller node")
-
- output_zrpcd = controller.run_cmd("ps --no-headers -C "
- "zrpcd -o state")
- states = output_zrpcd.split()
- running = any([s != 'Z' for s in states])
+ "running on the opendaylight nodes")
+
+ for odl_node in odl_nodes:
+ output_zrpcd = odl_node.run_cmd("ps --no-headers -C "
+ "zrpcd -o state")
+ states = output_zrpcd.split()
+ running = any([s != 'Z' for s in states])
+ msg = ("zrpcd is running in {name}".format(name=odl_node.name))
+
+ if not running:
+ logger.info("zrpcd is not running on the opendaylight node {name}"
+ .format(name=odl_node.name))
+ results.add_failure(msg)
+ else:
+ logger.info("zrpcd is running on the opendaylight node {name}"
+ .format(name=odl_node.name))
+ results.add_success(msg)
- msg = ("zrpcd is running")
+ results.add_to_summary(0, "-")
- if not running:
- logger.info("zrpcd is not running on the controller node")
+ # Find the BGP entity owner in ODL because of this bug:
+ # https://jira.opendaylight.org/browse/NETVIRT-1308
+ msg = ("Found BGP entity owner")
+ odl_node = test_utils.get_odl_bgp_entity_owner(odl_nodes)
+ if odl_node is None:
+ logger.error("Failed to find the BGP entity owner")
results.add_failure(msg)
else:
- logger.info("zrpcd is running on the controller node")
+ logger.info('BGP entity owner is {name}'
+ .format(name=odl_node.name))
results.add_success(msg)
-
results.add_to_summary(0, "-")
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ if installer_type in ['apex']:
+ odl_interface = 'br-ex'
+ elif installer_type in ['fuel']:
+ odl_interface = 'br-ext'
+ else:
+ logger.error("Incompatible installer type")
+ odl_ip, odl_netmask = test_utils.get_node_ip_and_netmask(
+ odl_node, odl_interface)
+
+ logger.info("Starting bgp speaker of opendaylight node at IP %s "
+ % odl_ip)
+
# Ensure that ZRPCD ip & port are well configured within ODL
add_client_conn_to_bgp = "bgp-connect -p 7644 -h 127.0.0.1 add"
- test_utils.run_odl_cmd(controller, add_client_conn_to_bgp)
+ test_utils.run_odl_cmd(odl_node, add_client_conn_to_bgp)
# Start bgp daemon
start_quagga = "odl:configure-bgp -op start-bgp-server " \
- "--as-num 100 --router-id {0}".format(controller_ext_ip)
- test_utils.run_odl_cmd(controller, start_quagga)
+ "--as-num 100 --router-id {0}".format(odl_ip)
+ test_utils.run_odl_cmd(odl_node, start_quagga)
+
+ # we need to wait a bit until the bgpd is up
+ time.sleep(5)
- logger.info("Checking if bgpd is running"
- " on the controller node")
+ logger.info("Checking if bgpd is running on the opendaylight node")
# Check if there is a non-zombie bgpd process
- output_bgpd = controller.run_cmd("ps --no-headers -C "
- "bgpd -o state")
+ output_bgpd = odl_node.run_cmd("ps --no-headers -C "
+ "bgpd -o state")
states = output_bgpd.split()
running = any([s != 'Z' for s in states])
msg = ("bgpd is running")
if not running:
- logger.info("bgpd is not running on the controller node")
+ logger.info("bgpd is not running on the opendaylight node")
results.add_failure(msg)
else:
- logger.info("bgpd is running on the controller node")
+ logger.info("bgpd is running on the opendaylight node")
results.add_success(msg)
results.add_to_summary(0, "-")
@@ -118,51 +156,63 @@ def main():
# but the test is disabled because of buggy upstream
# https://github.com/6WIND/zrpcd/issues/15
# stop_quagga = 'odl:configure-bgp -op stop-bgp-server'
- # test_utils.run_odl_cmd(controller, stop_quagga)
+ # test_utils.run_odl_cmd(odl_node, stop_quagga)
# logger.info("Checking if bgpd is still running"
- # " on the controller node")
+ # " on the opendaylight node")
- # output_bgpd = controller.run_cmd("ps --no-headers -C " \
- # "bgpd -o state")
+ # output_bgpd = odl_node.run_cmd("ps --no-headers -C " \
+ # "bgpd -o state")
# states = output_bgpd.split()
# running = any([s != 'Z' for s in states])
# msg = ("bgpd is stopped")
# if not running:
- # logger.info("bgpd is not running on the controller node")
+ # logger.info("bgpd is not running on the opendaylight node")
# results.add_success(msg)
# else:
- # logger.info("bgpd is still running on the controller node")
+ # logger.info("bgpd is still running on the opendaylight node")
# results.add_failure(msg)
# Taken from the sfc tests
if not os.path.isfile(COMMON_CONFIG.ubuntu_image_path):
logger.info("Downloading image")
- ft_utils.download_url(
+ image_dest_path = '/'.join(
+ COMMON_CONFIG.ubuntu_image_path.split('/')[:-1])
+ os_utils.download_url(
"http://artifacts.opnfv.org/sdnvpn/"
"ubuntu-16.04-server-cloudimg-amd64-disk1.img",
- "/home/opnfv/functest/data/")
+ image_dest_path)
else:
logger.info("Using old image")
- glance_client = os_utils.get_glance_client()
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids, flavor_ids) = ([] for i in range(9))
+ quagga_vm = None
+ fake_fip = None
try:
+ _, flavor_id = test_utils.create_custom_flavor()
+ flavor_ids.append(flavor_id)
+
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- test_utils.open_icmp(neutron_client, sg_id)
- test_utils.open_http_port(neutron_client, sg_id)
+ test_utils.open_icmp(conn, sg_id)
+ test_utils.open_http_port(conn, sg_id)
+
+ test_utils.open_bgp_port(conn, sg_id)
- test_utils.open_bgp_port(neutron_client, sg_id)
- net_id, subnet_1_id, router_1_id = test_utils.create_network(
- neutron_client,
+ image_id = os_utils.create_glance_image(
+ conn, TESTCASE_CONFIG.image_name,
+ COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
+ container="bare", public='public')
+ image_ids.append(image_id)
+
+ net_1_id, subnet_1_id, router_1_id = test_utils.create_network(
+ conn,
TESTCASE_CONFIG.net_1_name,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
@@ -170,7 +220,7 @@ def main():
quagga_net_id, subnet_quagga_id, \
router_quagga_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.quagga_net_name,
TESTCASE_CONFIG.quagga_subnet_name,
TESTCASE_CONFIG.quagga_subnet_cidr,
@@ -178,7 +228,7 @@ def main():
interfaces.append(tuple((router_1_id, subnet_1_id)))
interfaces.append(tuple((router_quagga_id, subnet_quagga_id)))
- network_ids.extend([net_id, quagga_net_id])
+ network_ids.extend([net_1_id, quagga_net_id])
router_ids.extend([router_1_id, router_quagga_id])
subnet_ids.extend([subnet_1_id, subnet_quagga_id])
@@ -191,7 +241,7 @@ def main():
logger.error("Incompatible installer type")
ubuntu_image_id = os_utils.create_glance_image(
- glance_client,
+ conn,
COMMON_CONFIG.ubuntu_image_name,
COMMON_CONFIG.ubuntu_image_path,
disk,
@@ -209,15 +259,14 @@ def main():
# this to work.
# We also create the FIP first because it is used in the
# cloud-init script.
- fip = os_utils.create_floating_ip(neutron_client)
# fake_fip is needed to bypass NAT
# see below for the reason why.
- fake_fip = os_utils.create_floating_ip(neutron_client)
-
- floatingip_ids.extend([fip['fip_id'], fake_fip['fip_id']])
+ fake_fip = os_utils.create_floating_ip(conn)
# pin quagga to some compute
- compute_node = nova_client.hypervisors.list()[0]
- quagga_compute_node = "nova:" + compute_node.hypervisor_hostname
+ floatingip_ids.append(fake_fip['fip_id'])
+ compute_node = conn.compute.hypervisors().next()
+ compute_node = conn.compute.get_hypervisor(compute_node)
+ quagga_compute_node = "nova:" + compute_node.name
# Map the hypervisor used above to a compute handle
# returned by releng's manager
for comp in computes:
@@ -225,15 +274,16 @@ def main():
compute = comp
break
quagga_bootstrap_script = quagga.gen_quagga_setup_script(
- controller_ext_ip,
+ odl_ip,
fake_fip['fip_addr'],
- ext_net_mask)
-
- _, flavor_id = test_utils.create_custom_flavor()
- flavor_ids.append(flavor_id)
+ odl_netmask,
+ TESTCASE_CONFIG.external_network_ip_prefix,
+ TESTCASE_CONFIG.route_distinguishers,
+ TESTCASE_CONFIG.import_targets,
+ TESTCASE_CONFIG.export_targets)
quagga_vm = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.quagga_instance_name,
ubuntu_image_id,
quagga_net_id,
@@ -243,59 +293,145 @@ def main():
userdata=quagga_bootstrap_script,
compute_node=quagga_compute_node)
- instance_ids.append(quagga_vm)
+ instance_ids.append(quagga_vm.id)
- fip_added = os_utils.add_floating_ip(nova_client,
- quagga_vm.id,
- fip['fip_addr'])
+ quagga_vm_port = test_utils.get_port(conn,
+ quagga_vm.id)
+ fip_added = os_utils.attach_floating_ip(conn,
+ quagga_vm_port.id)
msg = ("Assign a Floating IP to %s " %
TESTCASE_CONFIG.quagga_instance_name)
if fip_added:
results.add_success(msg)
+ floatingip_ids.append(fip_added.id)
else:
results.add_failure(msg)
+
test_utils.attach_instance_to_ext_br(quagga_vm, compute)
- try:
- testcase = "Bootstrap quagga inside an OpenStack instance"
- cloud_init_success = test_utils.wait_for_cloud_init(quagga_vm)
- if cloud_init_success:
- results.add_success(testcase)
- else:
- results.add_failure(testcase)
- results.add_to_summary(0, "=")
-
- results.add_to_summary(0, '-')
- results.add_to_summary(1, "Peer Quagga with OpenDaylight")
- results.add_to_summary(0, '-')
-
- neighbor = quagga.odl_add_neighbor(fake_fip['fip_addr'],
- controller_ext_ip,
- controller)
- peer = quagga.check_for_peering(controller)
-
- finally:
- test_utils.detach_instance_from_ext_br(quagga_vm, compute)
+ testcase = "Bootstrap quagga inside an OpenStack instance"
+ cloud_init_success = test_utils.wait_for_cloud_init(conn, quagga_vm)
+ if cloud_init_success:
+ results.add_success(testcase)
+ else:
+ results.add_failure(testcase)
+ results.add_to_summary(0, "=")
+
+ results.add_to_summary(0, '-')
+ results.add_to_summary(1, "Peer Quagga with OpenDaylight")
+ results.add_to_summary(0, '-')
+
+ neighbor = quagga.odl_add_neighbor(fake_fip['fip_addr'],
+ odl_ip,
+ odl_node)
+ peer = quagga.check_for_peering(odl_node)
if neighbor and peer:
results.add_success("Peering with quagga")
else:
results.add_failure("Peering with quagga")
+ test_utils.add_quagga_external_gre_end_point(odl_nodes,
+ fake_fip['fip_addr'])
+ test_utils.wait_before_subtest()
+
+ msg = ("Create VPN to define a VRF")
+ results.record_action(msg)
+ vpn_name = vpn_name = "sdnvpn-3"
+ kwargs = {
+ "import_targets": TESTCASE_CONFIG.import_targets,
+ "export_targets": TESTCASE_CONFIG.export_targets,
+ "route_targets": TESTCASE_CONFIG.route_targets,
+ "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
+ "name": vpn_name
+ }
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN1 created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.net_1_name)
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ # create a vm and connect it with network1,
+ # which is going to be bgpvpn associated
+ userdata_common = test_utils.generate_ping_userdata(
+ [TESTCASE_CONFIG.external_network_ip])
+
+ compute_node = conn.compute.hypervisors().next()
+ av_zone_1 = "nova:" + compute_node.name
+ vm_bgpvpn = test_utils.create_instance(
+ conn,
+ TESTCASE_CONFIG.instance_1_name,
+ image_id,
+ net_1_id,
+ sg_id,
+ fixed_ip=TESTCASE_CONFIG.instance_1_ip,
+ secgroup_name=TESTCASE_CONFIG.secgroup_name,
+ compute_node=av_zone_1,
+ userdata=userdata_common)
+ instance_ids.append(vm_bgpvpn.id)
+
+ # wait for VM to get IP
+ instance_up = test_utils.wait_for_instances_get_dhcp(vm_bgpvpn)
+ if not instance_up:
+ logger.error("One or more instances are down")
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, net_1_id)
+
+ test_utils.wait_before_subtest()
+
+ msg = ("External IP prefix %s is exchanged with ODL"
+ % TESTCASE_CONFIG.external_network_ip_prefix)
+ fib_added = test_utils.is_fib_entry_present_on_odl(
+ odl_nodes,
+ TESTCASE_CONFIG.external_network_ip_prefix,
+ TESTCASE_CONFIG.route_distinguishers)
+ if fib_added:
+ results.add_success(msg)
+ else:
+ results.add_failure(msg)
+
+ # TODO: uncomment the following once OVS is installed with > 2.8.3 and
+ # underlay connectivity is established between vxlan overlay and
+ # external network.
+ # results.get_ping_status_target_ip(
+ # vm_bgpvpn,
+ # TESTCASE_CONFIG.external_network_name,
+ # TESTCASE_CONFIG.external_network_ip,
+ # expected="PASS",
+ # timeout=300)
+
+ results.add_to_summary(0, "=")
+ logger.info("\n%s" % results.summary)
+
except Exception as e:
logger.error("exception occurred while executing testcase_3: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids, flavor_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ if quagga_vm is not None:
+ test_utils.detach_instance_from_ext_br(quagga_vm, compute)
+ test_utils.cleanup_nova(conn, instance_ids, flavor_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
+ if fake_fip is not None:
+ bgp_nbr_disconnect_cmd = ("bgp-nbr -i %s -a 200 del"
+ % fake_fip['fip_addr'])
+ test_utils.run_odl_cmd(odl_node, bgp_nbr_disconnect_cmd)
+ bgp_server_stop_cmd = ("bgp-rtr -r %s -a 100 del"
+ % odl_ip)
+ odl_zrpc_disconnect_cmd = "bgp-connect -p 7644 -h 127.0.0.1 del"
+ test_utils.run_odl_cmd(odl_node, bgp_server_stop_cmd)
+ test_utils.run_odl_cmd(odl_node, odl_zrpc_disconnect_cmd)
return results.compile_summary()
if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO)
sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_4.py b/sdnvpn/test/functest/testcase_4.py
index cc429c3..650a88a 100644
--- a/sdnvpn/test/functest/testcase_4.py
+++ b/sdnvpn/test/functest/testcase_4.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -11,9 +11,9 @@
import logging
import sys
-from functest.utils import openstack_utils as os_utils
from random import randint
from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
from sdnvpn.lib import utils as test_utils
from sdnvpn.lib.results import Results
@@ -26,39 +26,38 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
try:
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
network_1_id, subnet_1_id, router_1_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_1_name,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
TESTCASE_CONFIG.router_1_name)
network_2_id = test_utils.create_net(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_2_name)
subnet_2_id = test_utils.create_subnet(
- neutron_client,
+ conn,
TESTCASE_CONFIG.subnet_2_name,
TESTCASE_CONFIG.subnet_2_cidr,
network_2_id)
@@ -68,50 +67,50 @@ def main():
subnet_ids.extend([subnet_1_id, subnet_2_id])
sg_id = os_utils.create_security_group_full(
- neutron_client,
+ conn,
TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
# boot INTANCES
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
vm_3 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_3_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
- vm_3_ip = test_utils.get_instance_ip(vm_3)
+ vm_3_ip = test_utils.get_instance_ip(conn, vm_3)
vm_5 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_5_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
- vm_5_ip = test_utils.get_instance_ip(vm_5)
+ vm_5_ip = test_utils.get_instance_ip(conn, vm_5)
# We boot vm5 first because we need vm5_ip for vm4 userdata
u4 = test_utils.generate_ping_userdata([vm_5_ip])
vm_4 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_4_name,
image_id,
network_2_id,
@@ -119,7 +118,7 @@ def main():
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u4)
- vm_4_ip = test_utils.get_instance_ip(vm_4)
+ vm_4_ip = test_utils.get_instance_ip(conn, vm_4)
# We boot VM1 at the end because we need to get the IPs
# first to generate the userdata
@@ -128,7 +127,7 @@ def main():
vm_4_ip,
vm_5_ip])
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -199,18 +198,57 @@ def main():
results.add_to_summary(0, "-")
results.record_action(msg)
results.add_to_summary(0, "-")
- kwargs = {"import_targets": TESTCASE_CONFIG.targets1,
- "export_targets": TESTCASE_CONFIG.targets1,
- "name": vpn_name}
- bgpvpn = test_utils.update_bgpvpn(neutron_client,
- bgpvpn_id, **kwargs)
+
+ # use bgpvpn-create instead of update till NETVIRT-1067 bug is fixed
+ # kwargs = {"import_targets": TESTCASE_CONFIG.targets1,
+ # "export_targets": TESTCASE_CONFIG.targets1,
+ # "name": vpn_name}
+ # bgpvpn = test_utils.update_bgpvpn(neutron_client,
+ # bgpvpn_id, **kwargs)
+
+ test_utils.delete_bgpvpn(neutron_client, bgpvpn_id)
+ bgpvpn_ids.remove(bgpvpn_id)
+ kwargs = {
+ "import_targets": TESTCASE_CONFIG.targets1,
+ "export_targets": TESTCASE_CONFIG.targets1,
+ "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
+ "name": vpn_name
+ }
+
+ test_utils.wait_before_subtest()
+
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN re-created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate again network '%s' and router '%s 'to the VPN."
+ % (TESTCASE_CONFIG.net_2_name,
+ TESTCASE_CONFIG.router_1_name))
+ results.add_to_summary(0, "-")
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ test_utils.create_router_association(
+ neutron_client, bgpvpn_id, router_1_id)
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_router_assoc(
+ neutron_client, bgpvpn_id, router_1_id)
+ test_utils.wait_for_bgp_net_assoc(
+ neutron_client, bgpvpn_id, network_2_id)
+ # The above code has to be removed after re-enabling bgpvpn-update
logger.info("Waiting for the VMs to connect to each other using the"
" updated network configuration")
test_utils.wait_before_subtest()
- results.get_ping_status(vm_1, vm_4, expected="PASS", timeout=30)
- results.get_ping_status(vm_1, vm_5, expected="PASS", timeout=30)
+ # TODO: uncomment the following once ODL netvirt fixes the following
+ # bug: https://jira.opendaylight.org/browse/NETVIRT-932
+ # results.get_ping_status(vm_1, vm_4, expected="PASS", timeout=30)
+ # results.get_ping_status(vm_1, vm_5, expected="PASS", timeout=30)
results.add_to_summary(0, "=")
logger.info("\n%s" % results.summary)
@@ -219,9 +257,9 @@ def main():
logger.error("exception occurred while executing testcase_4: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
@@ -229,5 +267,4 @@ def main():
if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO)
sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_4bis.py b/sdnvpn/test/functest/testcase_4bis.py
new file mode 100644
index 0000000..6245f7c
--- /dev/null
+++ b/sdnvpn/test/functest/testcase_4bis.py
@@ -0,0 +1,215 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2018 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import logging
+import sys
+import pkg_resources
+
+from random import randint
+from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
+from sdnvpn.lib import utils as test_utils
+from sdnvpn.lib.results import Results
+
+logger = logging.getLogger(__name__)
+
+COMMON_CONFIG = sdnvpn_config.CommonConfig()
+TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
+ 'sdnvpn.test.functest.testcase_4bis')
+
+
+def main():
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
+
+ results.add_to_summary(0, '=')
+ results.add_to_summary(2, 'STATUS', 'SUBTEST')
+ results.add_to_summary(0, '=')
+
+ conn = os_utils.get_os_connection()
+ # neutron client is needed as long as bgpvpn heat module
+ # is not yet installed by default in apex (APEX-618)
+ neutron_client = os_utils.get_neutron_client()
+
+ image_ids = []
+ bgpvpn_ids = []
+
+ try:
+ image_id = os_utils.create_glance_image(
+ conn, TESTCASE_CONFIG.image_name,
+ COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
+ container='bare', public='public')
+ image_ids = [image_id]
+
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
+ az_1 = 'nova:' + compute_nodes[0]
+ az_2 = 'nova:' + compute_nodes[1]
+
+ file_path = pkg_resources.resource_filename(
+ 'sdnvpn', TESTCASE_CONFIG.hot_file_name)
+ templ = open(file_path, 'r').read()
+ logger.debug("Template is read: '%s'" % templ)
+ env = test_utils.get_heat_environment(TESTCASE_CONFIG, COMMON_CONFIG)
+ logger.debug("Environment is read: '%s'" % env)
+
+ env['name'] = TESTCASE_CONFIG.stack_name
+ env['template'] = templ
+ env['parameters']['image_n'] = TESTCASE_CONFIG.image_name
+ env['parameters']['av_zone_1'] = az_1
+ env['parameters']['av_zone_2'] = az_2
+
+ stack_id = os_utils.create_stack(conn, **env)
+ if stack_id is None:
+ logger.error('Stack create start failed')
+ raise SystemError('Stack create start failed')
+
+ test_utils.wait_stack_for_status(conn, stack_id, 'CREATE_COMPLETE')
+
+ router_1_output = os_utils.get_output(conn, stack_id, 'router_1_o')
+ router_1_id = router_1_output['output_value']
+ net_2_output = os_utils.get_output(conn, stack_id, 'net_2_o')
+ network_2_id = net_2_output['output_value']
+
+ vm_stack_output_keys = ['vm1_o', 'vm2_o', 'vm3_o', 'vm4_o', 'vm5_o']
+ vms = test_utils.get_vms_from_stack_outputs(conn,
+ stack_id,
+ vm_stack_output_keys)
+
+ logger.debug("Entering base test case with stack '%s'" % stack_id)
+
+ msg = ('Create VPN with eRT<>iRT')
+ results.record_action(msg)
+ vpn_name = 'sdnvpn-' + str(randint(100000, 999999))
+ kwargs = {
+ 'import_targets': TESTCASE_CONFIG.targets1,
+ 'export_targets': TESTCASE_CONFIG.targets2,
+ 'route_distinguishers': TESTCASE_CONFIG.route_distinguishers,
+ 'name': vpn_name
+ }
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate router '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['router_1_name'])
+ results.record_action(msg)
+ results.add_to_summary(0, '-')
+
+ test_utils.create_router_association(
+ neutron_client, bgpvpn_id, router_1_id)
+
+ # Remember: vms[X] is former vm_X+1
+
+ results.get_ping_status(vms[0], vms[1], expected='PASS', timeout=200)
+ results.get_ping_status(vms[0], vms[2], expected='PASS', timeout=30)
+ results.get_ping_status(vms[0], vms[3], expected='FAIL', timeout=30)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['net_2_name'])
+ results.add_to_summary(0, '-')
+ results.record_action(msg)
+ results.add_to_summary(0, '-')
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_router_assoc(
+ neutron_client, bgpvpn_id, router_1_id)
+ test_utils.wait_for_bgp_net_assocs(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ logger.info('Waiting for the VMs to connect to each other using the'
+ ' updated network configuration')
+ test_utils.wait_before_subtest()
+
+ results.get_ping_status(vms[3], vms[4], expected='PASS', timeout=30)
+ # TODO enable again when isolation in VPN with iRT != eRT works
+ # results.get_ping_status(vms[0], vms[3], expected="FAIL", timeout=30)
+ # results.get_ping_status(vms[0], vms[4], expected="FAIL", timeout=30)
+
+ msg = ('Update VPN with eRT=iRT ...')
+ results.add_to_summary(0, "-")
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ # use bgpvpn-create instead of update till NETVIRT-1067 bug is fixed
+ # kwargs = {"import_targets": TESTCASE_CONFIG.targets1,
+ # "export_targets": TESTCASE_CONFIG.targets1,
+ # "name": vpn_name}
+ # bgpvpn = test_utils.update_bgpvpn(neutron_client,
+ # bgpvpn_id, **kwargs)
+
+ test_utils.delete_bgpvpn(neutron_client, bgpvpn_id)
+ bgpvpn_ids.remove(bgpvpn_id)
+ kwargs = {
+ 'import_targets': TESTCASE_CONFIG.targets1,
+ 'export_targets': TESTCASE_CONFIG.targets1,
+ 'route_distinguishers': TESTCASE_CONFIG.route_distinguishers,
+ 'name': vpn_name
+ }
+
+ test_utils.wait_before_subtest()
+
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN re-created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate again network '%s' and router '%s 'to the VPN."
+ % (TESTCASE_CONFIG.heat_parameters['net_2_name'],
+ TESTCASE_CONFIG.heat_parameters['router_1_name']))
+ results.add_to_summary(0, '-')
+ results.record_action(msg)
+ results.add_to_summary(0, '-')
+
+ test_utils.create_router_association(
+ neutron_client, bgpvpn_id, router_1_id)
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_router_assoc(
+ neutron_client, bgpvpn_id, router_1_id)
+ test_utils.wait_for_bgp_net_assoc(
+ neutron_client, bgpvpn_id, network_2_id)
+ # The above code has to be removed after re-enabling bgpvpn-update
+
+ logger.info('Waiting for the VMs to connect to each other using the'
+ ' updated network configuration')
+ test_utils.wait_before_subtest()
+
+ # TODO: uncomment the following once ODL netvirt fixes the following
+ # bug: https://jira.opendaylight.org/browse/NETVIRT-932
+ # results.get_ping_status(vms[0], vms[3], expected="PASS", timeout=30)
+ # results.get_ping_status(vms[0], vms[4], expected="PASS", timeout=30)
+
+ results.add_to_summary(0, '=')
+ logger.info("\n%s" % results.summary)
+
+ except Exception as e:
+ logger.error("exception occurred while executing testcase_4bis: %s", e)
+ raise
+ finally:
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, [], bgpvpn_ids,
+ [], [], [], [])
+
+ try:
+ test_utils.delete_stack_and_wait(conn, stack_id)
+ except Exception as e:
+ logger.error(
+ "exception occurred while executing testcase_4bis: %s", e)
+
+ return results.compile_summary()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_7.py b/sdnvpn/test/functest/testcase_7.py
index 0e3a8f5..e588b14 100644
--- a/sdnvpn/test/functest/testcase_7.py
+++ b/sdnvpn/test/functest/testcase_7.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -21,8 +21,8 @@ network associated:
import logging
import sys
-from functest.utils import openstack_utils as os_utils
from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
from sdnvpn.lib import utils as test_utils
from sdnvpn.lib.results import Results
@@ -35,35 +35,34 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
try:
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
network_1_id, subnet_1_id, router_1_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_1_name,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
TESTCASE_CONFIG.router_1_name)
network_2_id, subnet_2_id, router_2_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_2_name,
TESTCASE_CONFIG.subnet_2_name,
TESTCASE_CONFIG.subnet_2_cidr,
@@ -76,23 +75,23 @@ def main():
subnet_ids.extend([subnet_1_id, subnet_2_id])
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- test_utils.open_icmp(neutron_client, sg_id)
- test_utils.open_http_port(neutron_client, sg_id)
+ test_utils.open_icmp(conn, sg_id)
+ test_utils.open_http_port(conn, sg_id)
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
u1 = test_utils.generate_ping_userdata([vm_2_ip])
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -151,25 +150,24 @@ def main():
results.record_action(msg)
results.add_to_summary(0, '-')
- fip = os_utils.create_floating_ip(neutron_client)
- fip_added = os_utils.add_floating_ip(nova_client, vm_2.id,
- fip['fip_addr'])
+ vm2_port = test_utils.get_port(conn, vm_2.id)
+ fip_added = os_utils.attach_floating_ip(conn, vm2_port.id)
if fip_added:
results.add_success(msg)
else:
results.add_failure(msg)
- results.ping_ip_test(fip['fip_addr'])
+ results.ping_ip_test(fip_added.floating_ip_address)
- floatingip_ids.append(fip['fip_id'])
+ floatingip_ids.append(fip_added.id)
except Exception as e:
logger.error("exception occurred while executing testcase_7: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
@@ -177,5 +175,4 @@ def main():
if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO)
sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_8.py b/sdnvpn/test/functest/testcase_8.py
index e372fe1..26d1f35 100644
--- a/sdnvpn/test/functest/testcase_8.py
+++ b/sdnvpn/test/functest/testcase_8.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -7,22 +7,20 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-"""
-Test whether router assoc can coexist with floating IP
-- Create VM1 in net1 with a subnet which is connected to a router
- which is connected with the gateway
-- Create VM2 in net2 with a subnet without a router attached.
-- Create bgpvpn with iRT=eRT
-- Assoc the router of net1 with bgpvpn and assoc net 2 with the bgpvpn
-- Try to ping from one VM to the other
-- Assign a floating IP to the VM in the router assoc network
-- Ping it the floating ip
-"""
+# Test whether router assoc can coexist with floating IP
+# - Create VM1 in net1 with a subnet which is connected to a router
+# which is connected with the gateway
+# - Create VM2 in net2 with a subnet without a router attached.
+# - Create bgpvpn with iRT=eRT
+# - Assoc the router of net1 with bgpvpn and assoc net 2 with the bgpvpn
+# - Try to ping from one VM to the other
+# - Assign a floating IP to the VM in the router assoc network
+# - Ping it the floating ip
import logging
import sys
-from functest.utils import openstack_utils as os_utils
from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
from sdnvpn.lib import utils as test_utils
from sdnvpn.lib.results import Results
@@ -35,101 +33,107 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
try:
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
network_1_id, subnet_1_id, router_1_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_1_name,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
TESTCASE_CONFIG.router_1_name)
- network_2_id = test_utils.create_net(
- neutron_client,
- TESTCASE_CONFIG.net_2_name)
- subnet_2_id = test_utils.create_subnet(
- neutron_client,
+ network_2_id, subnet_2_id, router_1_id = test_utils.create_network(
+ conn,
+ TESTCASE_CONFIG.net_2_name,
TESTCASE_CONFIG.subnet_2_name,
TESTCASE_CONFIG.subnet_2_cidr,
- network_2_id)
+ TESTCASE_CONFIG.router_1_name)
interfaces.append(tuple((router_1_id, subnet_1_id)))
+ interfaces.append(tuple((router_1_id, subnet_2_id)))
network_ids.extend([network_1_id, network_2_id])
router_ids.append(router_1_id)
subnet_ids.extend([subnet_1_id, subnet_2_id])
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- test_utils.open_icmp(neutron_client, sg_id)
- test_utils.open_http_port(neutron_client, sg_id)
-
+ test_utils.open_icmp(conn, sg_id)
+ test_utils.open_http_port(conn, sg_id)
+
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
+ av_zone_1 = "nova:" + compute_nodes[0]
+ # spawning the VMs on the same compute because fib flow (21) entries
+ # are not created properly if vm1 and vm2 are attached to two
+ # different computes
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_2_id,
sg_id,
- secgroup_name=TESTCASE_CONFIG.secgroup_name)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ secgroup_name=TESTCASE_CONFIG.secgroup_name,
+ compute_node=av_zone_1)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
u1 = test_utils.generate_ping_userdata([vm_2_ip])
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
+ compute_node=av_zone_1,
userdata=u1)
instance_ids.extend([vm_1.id, vm_2.id])
-
- results.record_action("Create VPN with eRT==iRT")
- vpn_name = "sdnvpn-8"
- kwargs = {
- "import_targets": TESTCASE_CONFIG.targets,
- "export_targets": TESTCASE_CONFIG.targets,
- "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
- "name": vpn_name
- }
- bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
- bgpvpn_id = bgpvpn['bgpvpn']['id']
- logger.debug("VPN created details: %s" % bgpvpn)
- bgpvpn_ids.append(bgpvpn_id)
-
- msg = ("Associate router '%s' and net '%s' to the VPN."
- % (TESTCASE_CONFIG.router_1_name,
- TESTCASE_CONFIG.net_2_name))
- results.record_action(msg)
- results.add_to_summary(0, "-")
-
- test_utils.create_router_association(
- neutron_client, bgpvpn_id, router_1_id)
- test_utils.create_network_association(
- neutron_client, bgpvpn_id, network_2_id)
-
- test_utils.wait_for_bgp_router_assoc(
- neutron_client, bgpvpn_id, router_1_id)
- test_utils.wait_for_bgp_net_assoc(
- neutron_client, bgpvpn_id, network_2_id)
+ # TODO: uncomment the lines 107-134 once ODL fixes
+ # the bug https://jira.opendaylight.org/browse/NETVIRT-932
+ # results.record_action("Create VPN with eRT==iRT")
+ # vpn_name = "sdnvpn-8"
+ # kwargs = {
+ # "import_targets": TESTCASE_CONFIG.targets,
+ # "export_targets": TESTCASE_CONFIG.targets,
+ # "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
+ # "name": vpn_name
+ # }
+ # bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ # bgpvpn_id = bgpvpn['bgpvpn']['id']
+ # logger.debug("VPN created details: %s" % bgpvpn)
+ # bgpvpn_ids.append(bgpvpn_id)
+
+ # msg = ("Associate router '%s' and net '%s' to the VPN."
+ # % (TESTCASE_CONFIG.router_1_name,
+ # TESTCASE_CONFIG.net_2_name))
+ # results.record_action(msg)
+ # results.add_to_summary(0, "-")
+
+ # test_utils.create_router_association(
+ # neutron_client, bgpvpn_id, router_1_id)
+ # test_utils.create_network_association(
+ # neutron_client, bgpvpn_id, network_2_id)
+
+ # test_utils.wait_for_bgp_router_assoc(
+ # neutron_client, bgpvpn_id, router_1_id)
+ # test_utils.wait_for_bgp_net_assoc(
+ # neutron_client, bgpvpn_id, network_2_id)
# Wait for VMs to get ips.
instances_up = test_utils.wait_for_instances_up(vm_2)
@@ -149,29 +153,30 @@ def main():
msg = "Assign a Floating IP to %s" % vm_1.name
results.record_action(msg)
- fip = os_utils.create_floating_ip(neutron_client)
+ vm1_port = test_utils.get_port(conn, vm_1.id)
+ fip_added = os_utils.attach_floating_ip(conn, vm1_port.id)
- fip_added = os_utils.add_floating_ip(nova_client,
- vm_1.id, fip['fip_addr'])
if fip_added:
results.add_success(msg)
else:
results.add_failure(msg)
+ fip = fip_added.floating_ip_address
+
results.add_to_summary(0, "=")
results.record_action("Ping %s via Floating IP" % vm_1.name)
results.add_to_summary(0, "-")
- results.ping_ip_test(fip['fip_addr'])
+ results.ping_ip_test(fip)
- floatingip_ids.append(fip['fip_id'])
+ floatingip_ids.append(fip_added.id)
except Exception as e:
logger.error("exception occurred while executing testcase_8: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
@@ -179,5 +184,4 @@ def main():
if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO)
sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_8bis.py b/sdnvpn/test/functest/testcase_8bis.py
new file mode 100644
index 0000000..d850020
--- /dev/null
+++ b/sdnvpn/test/functest/testcase_8bis.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2017 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Test whether router assoc can coexist with floating IP
+# - Create VM1 in net1 with a subnet which is connected to a router
+# which is connected with the gateway
+# - Create VM2 in net2 with a subnet without a router attached.
+# - Create bgpvpn with iRT=eRT
+# - Assoc the router of net1 with bgpvpn and assoc net 2 with the bgpvpn
+# - Try to ping from one VM to the other
+# - Assign a floating IP to the VM in the router assoc network
+# - Ping it the floating ip
+
+import logging
+import sys
+import pkg_resources
+
+from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
+from sdnvpn.lib import utils as test_utils
+from sdnvpn.lib.results import Results
+
+
+logger = logging.getLogger(__name__)
+
+COMMON_CONFIG = sdnvpn_config.CommonConfig()
+TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
+ 'sdnvpn.test.functest.testcase_8bis')
+
+
+def main():
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
+
+ results.add_to_summary(0, "=")
+ results.add_to_summary(2, "STATUS", "SUBTEST")
+ results.add_to_summary(0, "=")
+
+ # neutron client is needed as long as bgpvpn heat module
+ # is not yet installed by default in apex (APEX-618)
+ neutron_client = os_utils.get_neutron_client()
+
+ image_ids = []
+ bgpvpn_ids = []
+
+ try:
+ image_id = os_utils.create_glance_image(
+ conn, TESTCASE_CONFIG.image_name,
+ COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
+ container='bare', public='public')
+ image_ids = [image_id]
+
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
+ az_1 = "nova:" + compute_nodes[0]
+ # spawning the VMs on the same compute because fib flow (21) entries
+ # are not created properly if vm1 and vm2 are attached to two
+ # different computes
+
+ file_path = pkg_resources.resource_filename(
+ 'sdnvpn', TESTCASE_CONFIG.hot_file_name)
+ templ = open(file_path, 'r').read()
+ logger.debug("Template is read: '%s'" % templ)
+ env = test_utils.get_heat_environment(TESTCASE_CONFIG, COMMON_CONFIG)
+ logger.debug("Environment is read: '%s'" % env)
+
+ env['name'] = TESTCASE_CONFIG.stack_name
+ env['parameters']['external_nw'] = os_utils.get_external_net(conn)
+ env['template'] = templ
+ env['parameters']['image_n'] = TESTCASE_CONFIG.image_name
+ env['parameters']['av_zone_1'] = az_1
+
+ stack_id = os_utils.create_stack(conn, **env)
+ if stack_id is None:
+ logger.error('Stack create start failed')
+ raise SystemError('Stack create start failed')
+
+ test_utils.wait_stack_for_status(conn, stack_id, 'CREATE_COMPLETE')
+
+ router_1_output = os_utils.get_output(conn, stack_id, 'router_1_o')
+ router_1_id = router_1_output['output_value']
+ net_2_output = os_utils.get_output(conn, stack_id, 'net_2_o')
+ network_2_id = net_2_output['output_value']
+
+ vm_stack_output_keys = ['vm1_o', 'vm2_o']
+ vms = test_utils.get_vms_from_stack_outputs(conn,
+ stack_id,
+ vm_stack_output_keys)
+
+ logger.debug("Entering base test case with stack '%s'" % stack_id)
+
+ # TODO: check if ODL fixed bug
+ # https://jira.opendaylight.org/browse/NETVIRT-932
+ results.record_action('Create VPN with eRT==iRT')
+ vpn_name = 'sdnvpn-8'
+ kwargs = {
+ 'import_targets': TESTCASE_CONFIG.targets,
+ 'export_targets': TESTCASE_CONFIG.targets,
+ 'route_distinguishers': TESTCASE_CONFIG.route_distinguishers,
+ 'name': vpn_name
+ }
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate router '%s' and net '%s' to the VPN."
+ % (TESTCASE_CONFIG.heat_parameters['router_1_name'],
+ TESTCASE_CONFIG.heat_parameters['net_2_name']))
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ test_utils.create_router_association(
+ neutron_client, bgpvpn_id, router_1_id)
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_router_assoc(
+ neutron_client, bgpvpn_id, router_1_id)
+ test_utils.wait_for_bgp_net_assoc(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ results.get_ping_status(vms[0], vms[1], expected="PASS", timeout=200)
+ results.add_to_summary(0, "=")
+
+ msg = "Assign a Floating IP to %s - using stack update" % vms[0].name
+ results.record_action(msg)
+
+ file_path = pkg_resources.resource_filename(
+ 'sdnvpn', TESTCASE_CONFIG.hot_update_file_name)
+ templ_update = open(file_path, 'r').read()
+ logger.debug("Update template is read: '%s'" % templ_update)
+ templ = test_utils.merge_yaml(templ, templ_update)
+
+ env['name'] = TESTCASE_CONFIG.stack_name
+ env['parameters']['external_nw'] = os_utils.get_external_net(conn)
+ env['template'] = templ
+ env['parameters']['image_n'] = TESTCASE_CONFIG.image_name
+ env['parameters']['av_zone_1'] = az_1
+
+ os_utils.update_stack(conn, stack_id, **env)
+
+ test_utils.wait_stack_for_status(conn, stack_id, 'UPDATE_COMPLETE')
+
+ fip_1_output = os_utils.get_output(conn, stack_id, 'fip_1_o')
+ fip = fip_1_output['output_value']
+
+ results.add_to_summary(0, "=")
+ results.record_action("Ping %s via Floating IP" % vms[0].name)
+ results.add_to_summary(0, "-")
+ results.ping_ip_test(fip)
+
+ except Exception as e:
+ logger.error("exception occurred while executing testcase_8bis: %s", e)
+ raise
+ finally:
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, [], bgpvpn_ids,
+ [], [], [], [])
+
+ try:
+ test_utils.delete_stack_and_wait(conn, stack_id)
+ except Exception as e:
+ logger.error(
+ "exception occurred while executing testcase_8bis: %s", e)
+
+ return results.compile_summary()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_9.py b/sdnvpn/test/functest/testcase_9.py
index 1489a5a..c74ceb5 100644
--- a/sdnvpn/test/functest/testcase_9.py
+++ b/sdnvpn/test/functest/testcase_9.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -15,6 +15,7 @@
# - Verify that the OpenDaylight and gateway Quagga peer
import logging
import sys
+import os
from sdnvpn.lib import config as sdnvpn_config
from sdnvpn.lib import utils as test_utils
@@ -34,12 +35,21 @@ def main():
results.add_to_summary(0, "=")
openstack_nodes = test_utils.get_nodes()
-
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
# node.is_odl() doesn't work in Apex
# https://jira.opnfv.org/browse/RELENG-192
- controllers = [node for node in openstack_nodes
- if "running" in
- node.run_cmd("sudo systemctl status opendaylight")]
+ fuel_cmd = "sudo systemctl status opendaylight"
+ apex_cmd = "sudo docker exec opendaylight_api " \
+ "/opt/opendaylight/bin/status"
+ health_cmd = "sudo docker ps -f name=opendaylight_api -f " \
+ "health=healthy -q"
+ if installer_type in ["fuel"]:
+ controllers = [node for node in openstack_nodes
+ if "running" in node.run_cmd(fuel_cmd)]
+ elif installer_type in ["apex"]:
+ controllers = [node for node in openstack_nodes
+ if node.run_cmd(health_cmd)
+ if "Running" in node.run_cmd(apex_cmd)]
msg = ("Verify that all OpenStack nodes OVS br-int have "
"fail_mode set to secure")
@@ -67,5 +77,4 @@ def main():
if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO)
sys.exit(main())
diff --git a/setup.cfg b/setup.cfg
index bb825eb..ca4e03b 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,9 +1,12 @@
[metadata]
name = sdnvpn
-version = 5
home-page = https://wiki.opnfv.org/display/sdnvpn/SDNVPN+project+main+page
[files]
packages = sdnvpn
scripts =
sdnvpn/test/functest/run_sdnvpn_tests.py
+
+[entry_points]
+xtesting.testcase =
+ bgpvpn = sdnvpn.test.functest.run_sdnvpn_tests:SdnvpnFunctest
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 0000000..646bbae
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,5 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+flake8 # MIT
+yamllint
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..7880718
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,53 @@
+[tox]
+minversion = 1.6
+envlist =
+ docs,
+ docs-linkcheck,
+ pep8,
+ yamllint
+skipsdist = true
+
+[testenv]
+usedevelop = False
+setenv=
+ HOME = {envtmpdir}
+ PYTHONPATH = {toxinidir}
+deps =
+ -chttps://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=master
+ -chttps://git.opnfv.org/functest/plain/upper-constraints.txt?h=master
+ -r{toxinidir}/test-requirements.txt
+ -r{toxinidir}/requirements.txt
+install_command = pip install {opts} {packages}
+
+[testenv:docs]
+basepython = python2.7
+deps = -r{toxinidir}/docs/requirements.txt
+commands =
+ sphinx-build -W -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+basepython = python2.7
+deps = -r{toxinidir}/docs/requirements.txt
+commands = sphinx-build -W -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck
+
+[testenv:yamllint]
+basepython = python2.7
+files =
+ {toxinidir}/docs
+ {toxinidir}/sdnvpn/test/functest/
+commands =
+ yamllint -s {[testenv:yamllint]files}
+
+[testenv:pep8]
+basepython = python2.7
+commands = flake8 {toxinidir}
+
+[flake8]
+# E123, E125 skipped as they are invalid PEP-8.
+
+show-source = True
+ignore = E123,E125
+builtins = _
+exclude = build,dist,doc,legacy,.eggs,.git,.tox,.venv,testapi_venv,venv