summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--INFO.yaml79
-rw-r--r--docs/_static/favicon.icobin0 -> 15086 bytes
-rw-r--r--docs/_static/logo.pngbin0 -> 2829 bytes
-rw-r--r--docs/conf.py1
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/development/overview/index.rst468
-rw-r--r--docs/index.rst24
-rw-r--r--docs/release/installation/index.rst167
-rw-r--r--docs/release/release-notes/index.rst4
-rw-r--r--docs/release/release-notes/release-notes.rst63
-rw-r--r--docs/release/scenarios/os-odl-bgpvpn/scenario.description.rst33
-rw-r--r--docs/requirements.txt2
-rw-r--r--odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py9
-rwxr-xr-xodl-pipeline/lib/test_environment/test_environment.py1
-rwxr-xr-xodl-pipeline/lib/tripleo_introspector/tripleo_introspector.py1
-rwxr-xr-xodl-pipeline/lib/utils/processutils.py1
-rw-r--r--requirements.txt15
-rw-r--r--sdnvpn/artifacts/quagga_setup.sh24
-rw-r--r--sdnvpn/artifacts/testcase_1bis.yaml234
-rw-r--r--sdnvpn/artifacts/testcase_2bis.yaml289
-rw-r--r--sdnvpn/artifacts/testcase_4bis.yaml247
-rw-r--r--sdnvpn/artifacts/testcase_8bis.yaml173
-rw-r--r--sdnvpn/artifacts/testcase_8bis_upd.yaml17
-rw-r--r--sdnvpn/lib/openstack_utils.py782
-rw-r--r--sdnvpn/lib/quagga.py22
-rw-r--r--sdnvpn/lib/results.py27
-rw-r--r--sdnvpn/lib/utils.py536
-rwxr-xr-xsdnvpn/sh_utils/fetch-log-script.sh12
-rw-r--r--sdnvpn/test/functest/config.yaml137
-rw-r--r--sdnvpn/test/functest/run_sdnvpn_tests.py50
-rw-r--r--sdnvpn/test/functest/run_tempest.py127
-rw-r--r--sdnvpn/test/functest/testcase_1.py43
-rw-r--r--sdnvpn/test/functest/testcase_10.py56
-rw-r--r--sdnvpn/test/functest/testcase_11.py30
-rw-r--r--sdnvpn/test/functest/testcase_12.py30
-rw-r--r--sdnvpn/test/functest/testcase_13.py53
-rw-r--r--sdnvpn/test/functest/testcase_1bis.py209
-rw-r--r--sdnvpn/test/functest/testcase_2.py43
-rw-r--r--sdnvpn/test/functest/testcase_2bis.py188
-rw-r--r--sdnvpn/test/functest/testcase_3.py214
-rw-r--r--sdnvpn/test/functest/testcase_4.py41
-rw-r--r--sdnvpn/test/functest/testcase_4bis.py215
-rw-r--r--sdnvpn/test/functest/testcase_7.py39
-rw-r--r--sdnvpn/test/functest/testcase_8.py40
-rw-r--r--sdnvpn/test/functest/testcase_8bis.py176
-rw-r--r--sdnvpn/test/functest/testcase_9.py18
-rw-r--r--setup.cfg4
-rw-r--r--test-requirements.txt5
-rw-r--r--tox.ini53
50 files changed, 3467 insertions, 1541 deletions
diff --git a/.gitignore b/.gitignore
index 332a121..444a0cb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,7 +3,6 @@
*.pyc
*~
.*.sw?
-/docs_build/
/docs_output/
/releng/
*.tar.gz
@@ -13,3 +12,5 @@ odl-pipeline/build/*
odl-pipeline/trash/*
odl-pipeline/lib/tmp
odl-pipeline/disks/*
+.tox
+docs/_build/*
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 0000000..3968ad8
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,79 @@
+---
+project: 'SDN Distributed Routing and VPN'
+project_creation_date: 'September 1st, 2015'
+project_category: 'Collaborative Development'
+lifecycle_state: 'Incubation'
+project_lead: &opnfv_sdnvpn_ptl
+ name: 'Tim Irnich'
+ email: 'tim.irnich@ericsson.com'
+ id: 'timirnich'
+ company: 'ericsson.com'
+ timezone: 'Unknown'
+primary_contact: *opnfv_sdnvpn_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.opnfv.org/projects/sdnvpn'
+ key: 'sdnvpn'
+mailing_list:
+ type: 'mailman2'
+ url: 'opnfv-tech-discuss@lists.opnfv.org'
+ tag: '[sdnvpn]'
+realtime_discussion:
+ type: irc
+ server: 'freenode.net'
+ channel: '#opnfv-sdnvpn'
+meetings:
+ - type: 'gotomeeting+irc'
+ agenda: # eg: 'https://wiki.opnfv.org/display/'
+ url: # eg: 'https://global.gotomeeting.com/join/819733085'
+ server: 'freenode.net'
+ channel: '#opnfv-meeting'
+ repeats: 'weekly'
+ time: # eg: '16:00 UTC'
+repositories:
+ - 'sdnvpn'
+committers:
+ - <<: *opnfv_sdnvpn_ptl
+ - name: 'Prem Sankar Gopannan'
+ email: 'prem.sankar.g@ericsson.com'
+ company: 'ericsson.com'
+ id: 'premsankar74'
+ - name: 'Nikolas Hermanns'
+ email: 'nikolas.hermanns@ericsson.com'
+ company: 'ericsson.com'
+ id: 'enikher'
+ - name: 'Jose Lausuch'
+ email: 'jalausuch@suse.com'
+ company: 'suse.com'
+ id: 'jose.lausuch'
+ - name: 'Thomas Morin'
+ email: 'thomas.morin@orange.com'
+ company: 'orange.com'
+ id: 'tmmorin'
+ - name: 'Thomas Sounapoglou'
+ email: 'soth@intracom-telecom.com'
+ company: 'intracom-telecom.com'
+ id: 'tomsou'
+ - name: 'Periyasamy Palanisamy'
+ email: 'periyasamy.palanisamy@ericsson.com'
+ company: 'ericsson.com'
+ id: 'pperiyasamy'
+ - name: 'Periyasamy Palanisamy'
+ email: 'periyasamy.palanisamy@ericsson.com'
+ company: 'ericsson.com'
+ id: 'pperiyasamy'
+ - name: 'Nikos Karandreas'
+ email: 'nick@intracom-telecom.com'
+ company: 'intracom-telecom.com'
+ id: 'nick_kar'
+ - name: 'Dimitrios Tsiolakis'
+ email: 'dmts@intracom-telecom.com'
+ company: 'intracom-telecom.com'
+ id: 'dimitris_'
+tsc:
+ # yamllint disable rule:line-length
+ approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-09-01-13.59.html'
+ # yamllint enable rule:line-length
+ changes:
+ - type: 'promotion'
+ link: '(Helpdesk#26575)'
diff --git a/docs/_static/favicon.ico b/docs/_static/favicon.ico
new file mode 100644
index 0000000..bbe55ab
--- /dev/null
+++ b/docs/_static/favicon.ico
Binary files differ
diff --git a/docs/_static/logo.png b/docs/_static/logo.png
new file mode 100644
index 0000000..1519503
--- /dev/null
+++ b/docs/_static/logo.png
Binary files differ
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..eb12e74
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1 @@
+from docs_conf.conf import * # noqa: F401,F403
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 0000000..4175c7c
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: SDNVPN
diff --git a/docs/development/overview/index.rst b/docs/development/overview/index.rst
index 021ace9..1127130 100644
--- a/docs/development/overview/index.rst
+++ b/docs/development/overview/index.rst
@@ -1,20 +1,14 @@
-.. _sdnvpn-overview:
-
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) Tim Irnich, (tim.irnich@ericsson.com) and others
+.. SPDX-License-Identifier: CC-BY-4.0
+.. (c) OPNFV, Ericsson AB and others.
=======
SDN VPN
=======
-A high-level description of the scenarios is provided in this section.
-For details of the scenarios and their provided capabilities refer to
-the scenario description document:
-http://artifacts.opnfv.org/danube/sdnpvn/scenarios/os-odl_l2-bgpvpn/index.html
-
The BGPVPN feature enables creation of BGP VPNs on the Neutron API according to the OpenStack
-BGPVPN blueprint at https://blueprints.launchpad.net/neutron/+spec/neutron-bgp-vpn.
+BGPVPN blueprint at `Neutron Extension for BGP Based VPN <https://blueprints.launchpad.net/neutron/+spec/neutron-bgp-vpn>`_.
+
In a nutshell, the blueprint defines a BGPVPN object and a number of ways
how to associate it with the existing Neutron object model, as well as a unique
definition of the related semantics. The BGPVPN framework supports a backend
@@ -26,238 +20,222 @@ implementation through the ODL NetVirt project.
SDNVPN Testing Suite
====================
-An overview of the SDNVPN Test is depicted here. More details for each test case are provided:
-https://wiki.opnfv.org/display/sdnvpn/SDNVPN+Testing
-
- BGPVPN Tempest test cases
- - Create BGPVPN passes
- - Create BGPVPN as non-admin fails
- - Delete BGPVPN as non-admin fails
- - Show BGPVPN as non-owner fails
- - List BGPVPNs as non-owner fails
- - Show network associated BGPVPNs as non-owner fails
- - List network associated BGPVPNs as non-owner fails
- - Associate/Deassociate a network to a BGPVPN resource passes
- - Update route targets on a BGPVPN passes
- - Update route targets on a BGPVPN as non-admin fails
- - Reject the creation of BGPVPN with invalid route targets passes
- - Reject the update of BGPVPN with invalid route targets passes
- - Reject the association on an invalid network to a BGPVPN passes
- - Reject the diassociation on an invalid network to a BGPVPN passes
- - Associate/Deassociate a router to a BGPVPN resource passes
- - Attach the subnet of an associated network to an associated router of the same BGVPN passes
-
-
-
- Functest scenario specific tests:
-
- Test Case 1 - VPN provides connectivity between subnets, using network association
- Name: VPN connecting Neutron networks and subnets
- Description: VPNs provide connectivity across Neutron networks and subnets if configured accordingly.
-
- Test setup procedure:
- Set up VM1 and VM2 on Node1 and VM3 on Node2, all having ports in the same Neutron Network N1
- Moreover all ports have 10.10.10/24 addresses (this subnet is denoted SN1 in the following)
- Set up VM4 on Node1 and VM5 on Node2, both having ports in Neutron Network N2
- Moreover all ports have 10.10.11/24 addresses (this subnet is denoted SN2 in the following)
-
- Test execution:
- Create VPN1 with eRT<>iRT (so that connected subnets should not reach each other)
- Associate SN1 to VPN1
- Ping from VM1 to VM2 should work
- Ping from VM1 to VM3 should work
- Ping from VM1 to VM4 should not work
- Associate SN2 to VPN1
- Ping from VM4 to VM5 should work
- Ping from VM1 to VM4 should not work (disabled until isolation fixed upstream)
- Ping from VM1 to VM5 should not work (disabled until isolation fixed upstream)
- Change VPN 1 so that iRT=eRT
- Ping from VM1 to VM4 should work
- Ping from VM1 to VM5 should work
-
- Test Case 2 - tenant separation
- Name: Using VPNs for tenant separation
- Description: Using VPNs to isolate tenants so that overlapping IP address ranges can be used
-
- Test setup procedure:
- Set up VM1 and VM2 on Node1 and VM3 on Node2, all having ports in the same Neutron Network N1.
- VM1 and VM2 have IP addresses in a subnet SN1 with range 10.10.10/24
- VM1: 10.10.10.11, running an HTTP server which returns "I am VM1" for any HTTP request
- (or something else than an HTTP server)
- VM2: 10.10.10.12, running an HTTP server which returns "I am VM2" for any HTTP request
- VM3 has an IP address in a subnet SN2 with range 10.10.11/24
- VM3: 10.10.11.13, running an HTTP server which returns "I am VM3" for any HTTP request
- Set up VM4 on Node1 and VM5 on Node2, both having ports in Neutron Network N2
- VM4 has an address in a subnet SN1b with range 10.10.10/24
- VM4: 10.10.10.12 (the same as VM2), running an HTTP server which returns "I am VM4" for any HTTP request
- VM5 has an address in a subnet SN2b with range 10.10.11/24
- VM5: 10.10.11.13 (the same as VM3), running an HTTP server which returns "I am VM5" for any HTTP request
-
- Test execution:
- Create VPN 1 with iRT=eRT=RT1 and associate N1 to it
- HTTP from VM1 to VM2 and VM3 should work
- It returns "I am VM2" and "I am VM3" respectively
- HTTP from VM1 to VM4 and VM5 should not work
- It never returns "I am VM4" or "I am VM5"
- Create VPN2 with iRT=eRT=RT2 and associate N2 to it
- HTTP from VM4 to VM5 should work
- It returns "I am VM5"
- HTTP from VM4 to VM1 and VM3 should not work
- It never returns "I am VM1" or "I am VM3"
-
-
- Test Case 3 - Data Center Gateway integration
- Name: Data Center Gateway integration
- Description: Investigate the peering functionality of BGP protocol,
- using a Zrpcd/Quagga router and OpenDaylight Controller
-
- Test setup procedure:
- Search in the pool of nodes and find one Compute node and one Controller nodes, that have OpenDaylight controller running
- Start an instance using ubuntu-16.04-server-cloudimg-amd64-disk1.img image and in it run the Quagga setup script
- Start bgp router in the Controller node, using odl:configure-bgp
-
- Test execution:
- Set up a Quagga instance in a nova compute node
- Start a BGP router with OpenDaylight in a controller node
- Add the Quagga running in the instance as a neighbor
- Check that bgpd is running
- Verify that the OpenDaylight and gateway Quagga peer each other
- Start an instance in a second nova compute node and connect it with a new network, (Network 3-3).
- Create a bgpvpn (include parameters route-distinguisher and route-targets) and associate it with the network created
- Define the same route-distinguisher and route-targets on the simulated quagga side
- Check that the routes from the Network 3-3 are advertised towards simulated Quagga VM
-
- Test Case 4 - VPN provides connectivity between subnets using router association
- Functest: variant of Test Case 1.
- Set up a Router R1 with one connected network/subnet N1/S1.
- Set up a second network N2.
- Create VPN1 and associate Router R1 and Network N2 to it.
- Hosts from N2 should be able to reach hosts in N1.
-
- Name: VPN connecting Neutron networks and subnets using router association
- Description: VPNs provide connectivity across Neutron networks and subnets if configured accordingly.
-
- Test setup procedure:
- Set up VM1 and VM2 on Node1 and VM3 on Node2,
- All VMs have ports in the same Neutron Network N1 and 10.10.10/24 addresses
- (this subnet is denoted SN1 in the following).
- N1/SN1 are connected to router R1.
- Set up VM4 on Node1 and VM5 on Node2,
- Both VMs have ports in Neutron Network N2 and having 10.10.11/24 addresses
- (this subnet is denoted SN2 in the following)
-
- Test execution:
- Create VPN1 with eRT<>iRT (so that connected subnets should not reach each other)
- Associate R1 to VPN1
- Ping from VM1 to VM2 should work
- Ping from VM1 to VM3 should work
- Ping from VM1 to VM4 should not work
- Associate SN2 to VPN1
- Ping from VM4 to VM5 should work
- Ping from VM1 to VM4 should not work
- Ping from VM1 to VM5 should not work
- Change VPN1 so that iRT=eRT
- Ping from VM1 to VM4 should work
- Ping from VM1 to VM5 should work
-
- Test Case 7 - Network associate a subnet with a router attached to a VPN and
- verify floating IP functionality (disabled, because of ODL Bug 6962)
-
- A test for https://bugs.opendaylight.org/show_bug.cgi?id=6962
-
- Setup procedure:
- Create VM1 in a subnet with a router attached.
- Create VM2 in a different subnet with another router attached.
- Network associate them to a VPN with iRT=eRT
- Ping from VM1 to VM2 should work
- Assign a floating IP to VM1
- Pinging the floating IP should work
-
- Test Case 8 - Router associate a subnet with a router attached to a VPN and
- verify floating IP functionality
-
- Setup procedure:
- Create VM1 in a subnet with a router which is connected with the gateway
- Create VM2 in a different subnet without a router attached.
- Assoc the two networks in a VPN iRT=eRT
- One with router assoc, other with net assoc
- Try to ping from one VM to the other
- Assign a floating IP to the VM in the router assoc network
- Ping it
-
- Test Case 9 - Check fail mode in OVS br-int interfaces
- This testcase checks if the fail mode is always “secure”.
- To accomplish it, a check is performed on all OVS br-int interfaces, for all OpenStack nodes.
- The testcase is considered as successful if all OVS br-int interfaces have fail_mode=secure
-
-
- Test Case 10 - Check the communication between a group of VMs
- This testcase investigates if communication between a group of VMs is interrupted upon deletion
- and creation of VMs inside this group.
-
- Test case flow:
- Create 3 VMs: VM_1 on compute 1, VM_2 on compute 1, VM_3 on compute 2.
- All VMs ping each other.
- VM_2 is deleted.
- Traffic is still flying between VM_ 1 and VM_3.
- A new VM, VM_ 4 is added to compute 1.
- Traffic is not interrupted and VM_4 can be reached as well.
-
-
- Testcase 11: test Opendaylight resync and group_add_mod feature mechanisms
- This is testcase to test Opendaylight resync and group_add_mod feature functionalities
-
- Sub-testcase 11-1:
- Create and start 2 VMs, connected to a common Network.
- New groups should appear in OVS dump
- OVS disconnects and the VMs and the networks are cleaned.
- The new groups are still in the OVS dump,
- cause OVS is not connected anymore, so it is not notified that the groups are deleted
- OVS re-connects.
- The new groups should be deleted, as Opendaylight has to resync the groups totally and
- should remove the groups since VMS are deleted.
-
- Sub-testcase 11-2:
- Create and start 2 VMs, connected to a common Network.
- New groups should appear in OVS dump
- OVS disconnects.
- The new groups are still in the OVS dump, cause OVS is not connected anymore,
- so it is not notified that the groups are deleted
- OVS re-connects.
- The new groups should be still there, as the topology remains. Opendaylight Carbon's
- group_add_mod mechanism should handle the already existing group.
- OVS re-connects.
- The new groups should be still there, as the topology remains.
- Opendaylight Carbon’ group_add_mod mechanism should handle the already existing group.
-
- Testcase 12: Test Resync mechanism between Opendaylight and OVS
- This is the testcase to validate flows and groups are programmed correctly
- after resync which is triggered by OVS del-controller/set-controller commands
- and adding/remove iptables drop rule on OF port 6653.
-
- Sub-testcase 12-1:
- Create and start 2 VMs, connected to a common Network
- New flows and groups were added to OVS
- Reconnect the OVS by running del-ontroller and set-controller commands
- The flows and groups are still intact and none of the flows/groups
- are removed
- Reconnect the OVS by adding ip tables drop rule and then remove it
- The flows and groups are still intact and none of the flows/groups
- are removed
-
- Testcase 13: Test ECMP (Equal-cost multi-path routing) for the extra route
- This testcase validates spraying behavior in OvS when an extra route is
- configured such that it can be reached from two nova VMs in the
- same network.
-
- Setup procedure:
- Create and start VM1 and VM2 configured with sub interface set to same ip
- address in both VMs, connected to a common network/router.
- Update the VM1 and VM2's Neutron ports with allowed address pairs for sub
- interface ip/mac addresses.
- Create BGPVPN with two route distinguishers.
- Associate router with BGPVPN.
- Update the router with above sub-interface ip address with nexthops set to
- VMs ip addresses.
- Create VM3 and connected to the same network.
- Ping sub-interface IP address from VM3.
+An overview of the SDNVPN Test is depicted here. A more detailed description of each test case can
+be found at `SDNVPN Testing <https://wiki.opnfv.org/display/sdnvpn/SDNVPN+Testing>`_.
+
+Functest scenario specific tests
+""""""""""""""""""""""""""""""""""
+- **Test Case 1**: VPN provides connectivity between subnets, using network association
+
+ Name: VPN connecting Neutron networks and subnets
+ Description: VPNs provide connectivity across Neutron networks and subnets if configured accordingly.
+ Test setup procedure:Set up VM1 and VM2 on Node1 and VM3 on Node2, all having ports in the same Neutron Network N1
+
+ Moreover all ports have 10.10.10/24 addresses (this subnet is denoted SN1 in the following)
+ Set up VM4 on Node1 and VM5 on Node2, both having ports in Neutron Network N2
+ Moreover all ports have 10.10.11/24 addresses (this subnet is denoted SN2 in the following)
+
+ Test execution:
+ * Create VPN1 with eRT<>iRT (so that connected subnets should not reach each other)
+ * Associate SN1 to VPN1
+ * Ping from VM1 to VM2 should work
+ * Ping from VM1 to VM3 should work
+ * Ping from VM1 to VM4 should not work
+ * Associate SN2 to VPN1
+ * Ping from VM4 to VM5 should work
+ * Ping from VM1 to VM4 should not work (disabled until isolation fixed upstream)
+ * Ping from VM1 to VM5 should not work (disabled until isolation fixed upstream)
+ * Change VPN 1 so that iRT=eRT
+ * Ping from VM1 to VM4 should work
+ * Ping from VM1 to VM5 should work
+
+- **Test Case 2**: Tenant separation
+
+ Name: Using VPNs for tenant separation
+ Description: Using VPNs to isolate tenants so that overlapping IP address ranges can be used
+
+ Test setup procedure:
+ * Set up VM1 and VM2 on Node1 and VM3 on Node2, all having ports in the same Neutron Network N1.
+ * VM1 and VM2 have IP addresses in a subnet SN1 with range 10.10.10/24
+ * VM1: 10.10.10.11, running an HTTP server which returns "I am VM1" for any HTTP request (or something else than an HTTP server)
+ * VM2: 10.10.10.12, running an HTTP server which returns "I am VM2" for any HTTP request
+ * VM3 has an IP address in a subnet SN2 with range 10.10.11/24
+ * VM3: 10.10.11.13, running an HTTP server which returns "I am VM3" for any HTTP request
+ * Set up VM4 on Node1 and VM5 on Node2, both having ports in Neutron Network N2
+ * VM4 has an address in a subnet SN1b with range 10.10.10/24
+ * VM4: 10.10.10.12 (the same as VM2), running an HTTP server which returns "I am VM4" for any HTTP request
+ * VM5 has an address in a subnet SN2b with range 10.10.11/24
+ * VM5: 10.10.11.13 (the same as VM3), running an HTTP server which returns "I am VM5" for any HTTP request
+
+ Test execution:
+ * Create VPN 1 with iRT=eRT=RT1 and associate N1 to it
+ * HTTP from VM1 to VM2 and VM3 should work
+ It returns "I am VM2" and "I am VM3" respectively
+ * HTTP from VM1 to VM4 and VM5 should not work
+ It never returns "I am VM4" or "I am VM5"
+ * Create VPN2 with iRT=eRT=RT2 and associate N2 to it
+ * HTTP from VM4 to VM5 should work
+ It returns "I am VM5"
+ * HTTP from VM4 to VM1 and VM3 should not work
+ It never returns "I am VM1" or "I am VM3"
+
+
+- **Test Case 3**: Data Center Gateway integration
+
+ Name: Data Center Gateway integration
+ Description: Investigate the peering functionality of BGP protocol, using a Zrpcd/Quagga router
+ and OpenDaylight Controller
+
+ Test setup procedure:
+ * Search in the pool of nodes and find one Compute node and one Controller nodes, that have OpenDaylight controller running
+ * Start an instance using ubuntu-16.04-server-cloudimg-amd64-disk1.img image and in it run the Quagga setup script
+ * Start bgp router in the Controller node, using odl:configure-bgp
+
+ Test execution:
+ * Set up a Quagga instance in a nova compute node
+ * Start a BGP router with OpenDaylight in a controller node
+ * Add the Quagga running in the instance as a neighbor
+ * Check that bgpd is running
+ * Verify that the OpenDaylight and gateway Quagga peer each other
+ * Start an instance in a second nova compute node and connect it with a new network, (Network 3-3).
+ * Create a bgpvpn (include parameters route-distinguisher and route-targets) and associate it with the network created
+ * Define the same route-distinguisher and route-targets on the simulated quagga side
+ * Check that the routes from the Network 3-3 are advertised towards simulated Quagga VM
+
+- **Test Case 4**: VPN provides connectivity between subnets using router association
+
+ Functest: variant of Test Case 1.
+ * Set up a Router R1 with one connected network/subnet N1/S1.
+ * Set up a second network N2.
+ * Create VPN1 and associate Router R1 and Network N2 to it.
+ * Hosts from N2 should be able to reach hosts in N1.
+
+ Name: VPN connecting Neutron networks and subnets using router association
+ Description: VPNs provide connectivity across Neutron networks and subnets if configured accordingly.
+
+ Test setup procedure:
+ * Set up VM1 and VM2 on Node1 and VM3 on Node2,
+ * All VMs have ports in the same Neutron Network N1 and 10.10.10/24 addresses
+ * (this subnet is denoted SN1 in the following).
+ * N1/SN1 are connected to router R1.
+ * Set up VM4 on Node1 and VM5 on Node2,
+ * Both VMs have ports in Neutron Network N2 and having 10.10.11/24 addresses
+ * (this subnet is denoted SN2 in the following)
+
+ Test execution:
+ * Create VPN1 with eRT<>iRT (so that connected subnets should not reach each other)
+ * Associate R1 to VPN1
+ Ping from VM1 to VM2 should work
+ Ping from VM1 to VM3 should work
+ Ping from VM1 to VM4 should not work
+ * Associate SN2 to VPN1
+ Ping from VM4 to VM5 should work
+ Ping from VM1 to VM4 should not work
+ Ping from VM1 to VM5 should not work
+ * Change VPN1 so that iRT=eRT
+ Ping from VM1 to VM4 should work
+ Ping from VM1 to VM5 should work
+
+- **Test Case 7** - Network associate a subnet with a router attached to a VPN and verify floating IP
+ functionality (disabled, because of ODL Bug 6962)
+
+ A test for https://bugs.opendaylight.org/show_bug.cgi?id=6962
+
+ Setup procedure:
+ * Create VM1 in a subnet with a router attached.
+ * Create VM2 in a different subnet with another router attached.
+ * Network associate them to a VPN with iRT=eRT
+ * Ping from VM1 to VM2 should work
+ * Assign a floating IP to VM1
+ * Pinging the floating IP should work
+
+- **Test Case 8** - Router associate a subnet with a router attached to a VPN and
+ verify floating IP functionality
+
+ Setup procedure:
+ * Create VM1 in a subnet with a router which is connected with the gateway
+ * Create VM2 in a different subnet without a router attached.
+ * Assoc the two networks in a VPN iRT=eRT
+ * One with router assoc, other with net assoc
+ * Try to ping from one VM to the other
+ * Assign a floating IP to the VM in the router assoc network
+ * Ping it
+
+- **Test Case 9** - Check fail mode in OVS br-int interfaces
+
+ This testcase checks if the fail mode is always 'secure'.
+ To accomplish it, a check is performed on all OVS br-int interfaces, for all OpenStack nodes.
+ The testcase is considered as successful if all OVS br-int interfaces have fail_mode=secure
+
+- **Test Case 10** - Check the communication between a group of VMs
+
+ This testcase investigates if communication between a group of VMs is interrupted upon deletion
+ and creation of VMs inside this group.
+
+ Test case flow:
+ * Create 3 VMs: VM_1 on compute 1, VM_2 on compute 1, VM_3 on compute 2.
+ * All VMs ping each other.
+ * VM_2 is deleted.
+ * Traffic is still flying between VM_1 and VM_3.
+ * A new VM, VM_4 is added to compute 1.
+ * Traffic is not interrupted and VM_4 can be reached as well.
+
+
+- **Testcase 11**: test Opendaylight resync and group_add_mod feature mechanisms
+
+ This is testcase to test Opendaylight resync and group_add_mod feature functionalities
+
+ Sub-testcase 11-1:
+ * Create and start 2 VMs, connected to a common Network.
+ New groups should appear in OVS dump
+ * OVS disconnects and the VMs and the networks are cleaned.
+ The new groups are still in the OVS dump,
+ cause OVS is not connected anymore, so it is not notified that the groups are deleted
+ * OVS re-connects.
+ The new groups should be deleted, as Opendaylight has to resync the groups totally and
+ should remove the groups since VMS are deleted.
+
+ Sub-testcase 11-2:
+ * Create and start 2 VMs, connected to a common Network.
+ New groups should appear in OVS dump
+ * OVS disconnects.
+ The new groups are still in the OVS dump, cause OVS is not connected anymore,
+ so it is not notified that the groups are deleted
+ * OVS re-connects.
+ The new groups should be still there, as the topology remains. Opendaylight Carbon's
+ group_add_mod mechanism should handle the already existing group.
+ * OVS re-connects.
+ The new groups should be still there, as the topology remains.
+ Opendaylight Carbon’ group_add_mod mechanism should handle the already existing group.
+
+- **Testcase 12**: Test Resync mechanism between Opendaylight and OVS
+ This is the testcase to validate flows and groups are programmed correctly
+ after resync which is triggered by OVS del-controller/set-controller commands
+ and adding/remove iptables drop rule on OF port 6653.
+
+ Sub-testcase 12-1:
+ * Create and start 2 VMs, connected to a common Network
+ New flows and groups were added to OVS
+ * Reconnect the OVS by running del-ontroller and set-controller commands
+ The flows and groups are still intact and none of the flows/groups
+ are removed
+ * Reconnect the OVS by adding ip tables drop rule and then remove it
+ The flows and groups are still intact and none of the flows/groups
+ are removed
+
+- **Testcase 13**: Test ECMP (Equal-cost multi-path routing) for the extra route
+
+ This testcase validates spraying behavior in OvS when an extra route is
+ configured such that it can be reached from two nova VMs in the
+ same network.
+
+ Setup procedure:
+ * Create and start VM1 and VM2 configured with sub interface set to same ip address in both VMs,
+ connected to a common network/router.
+ * Update the VM1 and VM2's Neutron ports with allowed address pairs for sub interface ip/mac
+ addresses.
+ * Create BGPVPN with two route distinguishers.
+ * Associate router with BGPVPN.
+ * Update the router with above sub-interface ip address with nexthops set to VMs ip addresses.
+ * Create VM3 and connected to the same network.
+ * Ping sub-interface IP address from VM3.
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..d58d5d5
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,24 @@
+.. _sdnvpn:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+*********************************
+OPNFV SDNVPN
+*********************************
+
+.. toctree::
+ :numbered:
+ :maxdepth: 3
+
+ release/release-notes/index
+ release/configguide/index
+ release/userguide/index
+ release/installation/index
+ release/scenarios/os-odl-bgpvpn/index
+ development/requirements/index
+ development/overview/index
+ development/design/index
+
diff --git a/docs/release/installation/index.rst b/docs/release/installation/index.rst
index 2625ef9..089bc55 100644
--- a/docs/release/installation/index.rst
+++ b/docs/release/installation/index.rst
@@ -1,8 +1,6 @@
-.. _sdnvpn-installation:
-
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) Tim Irnich, (tim.irnich@ericsson.com) and others
+.. SPDX-License-Identifier: CC-BY-4.0
+.. (c) OPNFV, Ericsson AB and others.
============================
SDN VPN feature installation
@@ -33,7 +31,9 @@ spec>.
When ODL is used as an SDN Controller in an OPNFV virtual deployment, ODL is
running on the OpenStack Controller VMs. It is therefore recommended to
-increase the amount of resources for these VMs.
+increase the amount of resources for these VMs. ODL is running in a separate
+VM in case of Fuel, thus, the below recommendation is not applicable when
+deploying the scenario on Fuel installer.
Our recommendation is to have 2 additional virtual cores and 8GB
additional virtual memory on top of the normally recommended
@@ -52,11 +52,11 @@ Installation using Fuel installer
Preparing the host to install Fuel by script
============================================
-.. Not all of these options are relevant for all scenarios. I advise following the
+.. Not all of these options are relevant for all scenarios. I advise following the
.. instructions applicable to the deploy tool used in the scenario.
-Before starting the installation of the os-odl-bgpnvp scenario some
-preparation of the machine that will host the Fuel VM must be done.
+Before starting the installation of the os-odl-bgpvpn-noha scenario the following
+preparation must be done on the machine that will host the Fuel VM.
Installation of required packages
@@ -66,17 +66,8 @@ Jumphost (or the host which serves the VMs for the virtual deployment) needs to
install the following packages:
::
- sudo apt-get install -y git make curl libvirt-bin libpq-dev qemu-kvm \
- qemu-system tightvncserver virt-manager sshpass \
- fuseiso genisoimage blackbox xterm python-pip \
- python-git python-dev python-oslo.config \
- python-pip python-dev libffi-dev libxml2-dev \
- libxslt1-dev libffi-dev libxml2-dev libxslt1-dev \
- expect curl python-netaddr p7zip-full
-
- sudo pip install GitPython pyyaml netaddr paramiko lxml scp \
- python-novaclient python-neutronclient python-glanceclient \
- python-keystoneclient debtcollector netifaces enum
+ sudo apt-get install -y git make curl libvirt-bin qemu-kvm \
+ python-pip python-dev
Download the source code and artifact
-------------------------------------
@@ -87,158 +78,58 @@ First of all the opnfv-fuel repository needs to be cloned:
git clone ssh://<user>@gerrit.opnfv.org:29418/fuel
-To check out a specific
-version of OPNFV, checkout the appropriate branch:
+To check out a specific version of OPNFV, checkout the appropriate branch:
::
cd fuel
- git checkout stable/<colorado|danube|euphrates|fraser>
-
-Now download the corresponding OPNFV Fuel ISO into an appropriate folder from
-the website
-::
- https://www.opnfv.org/software/downloads/release-archives
-
-Have in mind that the fuel repo version needs to map with the downloaded
-artifact. Note: it is also possible to build the Fuel image using the
-tools found in the fuel git repository, but this is out of scope of the
-procedure described here. Check the Fuel project documentation for more
-information on building the Fuel ISO.
-
+ git checkout stable/gambia
Simplified scenario deployment procedure using Fuel
===================================================
-This section describes the installation of the os-odl-bgpvpn-ha or
+This section describes the installation of the
os-odl-bgpvpn-noha OPNFV reference platform stack across a server cluster
or a single host as a virtual deployment.
-Scenario Preparation
---------------------
-dea.yaml and dha.yaml need to be copied and changed according to the lab-name/host
-where you deploy.
-Copy the full lab config from:
-::
-
- cp <path-to-opnfv-fuel-repo>/deploy/config/labs/devel-pipeline/elx \
- <path-to-opnfv-fuel-repo>/deploy/config/labs/devel-pipeline/<your-lab-name>
-
-Add at the bottom of dha.yaml
-::
-
- disks:
- fuel: 100G
- controller: 100G
- compute: 100G
-
- define_vms:
- controller:
- vcpu:
- value: 4
- memory:
- attribute_equlas:
- unit: KiB
- value: 16388608
- currentMemory:
- attribute_equlas:
- unit: KiB
- value: 16388608
-
-
-Check if the default settings in dea.yaml are in line with your intentions
-and make changes as required.
-
Installation procedures
-----------------------
-We describe several alternative procedures in the following.
-First, we describe several methods that are based on the deploy.sh script,
-which is also used by the OPNFV CI system.
-It can be found in the Fuel repository.
-
-In addition, the SDNVPN feature can also be configured manually in the Fuel GUI.
-This is described in the last subsection.
-
-Before starting any of the following procedures, go to
-::
-
- cd <opnfv-fuel-repo>/ci
-
-Full automatic virtual deployment High Availablity Mode
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The following command will deploy the high-availability flavor of SDNVPN scenario os-odl-bgpvpn-ha
-in a fully automatic way, i.e. all installation steps (Fuel server installation, configuration,
-node discovery and platform deployment) will take place without any further prompt for user input.
-::
-
- sudo bash ./deploy.sh -b file://<path-to-opnfv-fuel-repo>/config/ -l devel-pipeline -p <your-lab-name> -s os-odl_l2-bgpvpn-ha -i file://<path-to-fuel-iso>
+This chapter describes how to deploy the scenario with the use of deploy.sh script,
+which is also used by the OPNFV CI system. Script can be found in the Fuel
+repository.
Full automatic virtual deployment NO High Availability Mode
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The following command will deploy the SDNVPN scenario in its non-high-availability flavor (note the
-different scenario name for the -s switch). Otherwise it does the same as described above.
-::
-
- sudo bash ./deploy.sh -b file://<path-to-opnfv-fuel-repo>/config/ -l devel-pipeline -p <your-lab-name> -s os-odl_l2-bgpvpn-noha -i file://<path-to-fuel-iso>
-
-Automatic Fuel installation and manual scenario deployment
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-A useful alternative to the full automatic procedure is to only autodeploy the Fuel host and to run host selection, role assignment and SDNVPN scenario configuration manually.
-::
-
- sudo bash ./deploy.sh -b file://<path-to-opnfv-fuel-repo>/config/ -l devel-pipeline -p <your-lab-name> -s os-odl_l2-bgpvpn-ha -i file://<path-to-fuel-iso> -e
-
-With -e option the installer does not launch environment deployment, so
-a user can do some modification before the scenario is really deployed.
-Another interesting option is the -f option which deploys the scenario using an existing Fuel host.
-
-The result of this installation is a fuel sever with the right config for
-BGPVPN. Now the deploy button on fuel dashboard can be used to deploy the environment.
-It is as well possible to do the configuration manuell.
-
-Feature configuration on existing Fuel
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-If a Fuel server is already provided but the fuel plugins for Opendaylight, Openvswitch
-and BGPVPN are not provided install them by:
+The following command will deploy the SDNVPN scenario in its non-high-availability flavor.
::
- cd /opt/opnfv/
- fuel plugins --install fuel-plugin-ovs-*.noarch.rpm
- fuel plugins --install opendaylight-*.noarch.rpm
- fuel plugins --install bgpvpn-*.noarch.rpm
-
-If plugins are installed and you want to update them use --force flag.
-
-Now the feature can be configured. Create a new environment with "Neutron with ML2 plugin" and
-in there "Neutron with tunneling segmentation".
-Go to Networks/Settings/Other and check "Assign public network to all nodes". This is required for
-features such as floating IP, which require the Compute hosts to have public interfaces.
-Then go to settings/other and check "OpenDaylight plugin", "Use ODL to manage L3 traffic",
-"BGPVPN plugin" and set the OpenDaylight package version to "5.2.0-1". Then you should
-be able to check "BGPVPN extensions" in OpenDaylight plugin section.
-
-Now the deploy button on fuel dashboard can be used to deploy the environment.
+ ci/deploy.sh -l <lab_name> \
+ -p <pod_name> \
+ -b <URI to configuration repo containing the PDF file> \
+ -s os-odl-bgpvpn-noha \
+ -D \
+ -S <Storage directory for disk images> |& tee deploy.log
Virtual deployment using Apex installer
=======================================
Prerequisites
-^^^^^^^^^^^^^
+-------------
+
For Virtual Apex deployment a host with Centos 7 is needed. This installation
was tested on centos-release-7-2.1511.el7.centos.2.10.x86_64 however any other
Centos 7 version should be fine.
Build and Deploy
-^^^^^^^^^^^^^^^^
-Download the Apex repo from opnfv gerrit and checkout stable/danube:
+----------------
+
+Download the Apex repo from opnfv gerrit and checkout stable/gambia:
::
git clone ssh://<user>@gerrit.opnfv.org:29418/apex
cd apex
- git checkout stable/danube
+ git checkout stable/gambia
In apex/contrib you will find simple_deploy.sh:
::
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index 2b6664a..c7e07ee 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -4,9 +4,9 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) Nikolas Hermanns, (nikolas.hermanns@ericsson.com) and others
-==================
+=====================
SDN VPN release notes
-==================
+=====================
.. toctree::
:maxdepth: 3
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 1a3e9a5..a5b671c 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -3,31 +3,26 @@
.. _-os-odl-bgpvpn-ha:
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier: CC-BY-4.0
.. (c) Periyasamy Palanisamy <periyasamy.palanisamy@ericsson.com> and others
=====================
SDN VPN Release Notes
=====================
-License
-=======
-
-This work is licensed under a Creative Commons Attribution 4.0 International
-License. .. http://creativecommons.org/licenses/by/4.0 ..
-(c) Tim Irnich (Ericsson) and others
Abstract
========
-This document comprises the release notes for the SDN VPN feature contained in the Fraser
+This document comprises the release notes for the SDN VPN feature contained in the Gambia
release of OPNFV.
Important notes
===============
-In the Fraser release, SDN VPN only supports ODL as a backend. Make sure to always deploy
-SDN VPN and ODL together. Make use of deployment scenarios including the SDNVPN feature such as os_odl_bgpvpn_{ha|noha}.
+In the Gambia release, SDN VPN only supports ODL as a backend. Make sure to always deploy
+SDN VPN and ODL together. Make use of deployment scenarios including the SDNVPN feature such
+as os_odl_bgpvpn_{ha|noha}.
Summary
=======
@@ -44,28 +39,28 @@ Release Data
| **Project** | sdnvpn |
| | |
+--------------------------------------+-------------------------------------------+
-| **Repo/tag** | opnfv-6.2.0 |
+| **Repo/tag** | opnfv-7.1.0 |
| | |
+--------------------------------------+-------------------------------------------+
-| **Release designation** | Fraser 6.2 |
+| **Release designation** | Gambia 7.1 |
| | |
+--------------------------------------+-------------------------------------------+
-| **Release date** | June 29 2018 |
+| **Release date** | Dec 14, 2018 |
| | |
+--------------------------------------+-------------------------------------------+
-| **Purpose of the delivery** | New test cases |
+| **Purpose of the delivery** | OPNFV Gambia 7.1 release |
| | |
+--------------------------------------+-------------------------------------------+
Version change
--------------
-Compared to the Euphrates release, new testcases were added to
-functest to guarantee functionality.
+Compared to the Fraser release, functest testcases were enriched to guarantee functionality.
+Also several enhancements were added to improve testing efficiency.
Module version changes
~~~~~~~~~~~~~~~~~~~~~~
-ODL has been upgraded to Nitrogen.
+.. ODL has been upgraded to Nitrogen.
Document changes
~~~~~~~~~~~~~~~~
@@ -96,23 +91,23 @@ Deliverables
Software deliverables
~~~~~~~~~~~~~~~~~~~~~
-- Changes to Apex to enable a BGPVPN deployment and integration of Quagga BGP.
-- Integration of VPN Service functional tests and BGPVPN API tests into Functest framework.
-- Enabling performance tests in Yardstick.
-- Changes to 6Wind Zrpcd to enable integration with Apex.
-- Intra Datacenter ECMP (Equal Cost Multi Pathing) Testcase.
-- OpenDaylight and Open vSwitch Resynchronization Testcase.
-- Improved quality and stability of Testcase runs in CI environment.
-- External BGPVPN scenario added for XCI based deployment for BGPVPN scenarios.
+- Orchestrate BGPVPN with Openstack HEAT templates
+- Verify BGP route exchange with a peer in both directions
+- Support for ECMP load balancing
+- Consolidate image creation in Apex and Fuel
+- Remove the dependency between not running quagga and created flows
+- Delete ODL configuration after each test case run
+- Add BGPVPN scenarios to XCI and enable SDNVPN tests
+- Enable and test ODL clustering for bgpvpn-ha scenario
+
Documentation deliverables
~~~~~~~~~~~~~~~~~~~~~~~~~~
-- Configuration guide
-
-- User guide
-
+- Installation guide
- Release notes (this document)
+- Overview
+- Test scenario description
Known Limitations, Issues and Workarounds
=========================================
@@ -127,12 +122,12 @@ Known issues
Moving to the new NetVirt has caused a regression in which a subnet
cannot be both attached to a Router and Network associated to a VPN.
This has been worked around in the tests and the upstream bug is being
-tracked [0] and [2].
+tracked [0]_ and [2]_.
NAT for a VM which is in a private neutron network does not work. Instances
created in subnets that are connected to the public network via a gateway
should have external connectivity. This does not work and can be worked
-around by assigning a Floating IP to the instance [1].
+around by assigning a Floating IP to the instance [1]_.
Currently we observe non-deterministic failures of individual tests within the
SDNVPN section of the Functest suite, which are not reproducible in the development
@@ -159,6 +154,6 @@ with the exceptions described above.
References
==========
-[0] https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-94
-[1] https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-99
-[2] https://jira.opendaylight.org/browse/NETVIRT-932
+.. [0] https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-94
+.. [1] https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-99
+.. [2] https://jira.opendaylight.org/browse/NETVIRT-932
diff --git a/docs/release/scenarios/os-odl-bgpvpn/scenario.description.rst b/docs/release/scenarios/os-odl-bgpvpn/scenario.description.rst
index 5d6c06d..8d1cb9c 100644
--- a/docs/release/scenarios/os-odl-bgpvpn/scenario.description.rst
+++ b/docs/release/scenarios/os-odl-bgpvpn/scenario.description.rst
@@ -1,5 +1,5 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier: CC-BY-4.0
.. (c) Periyasamy Palanisamy <periyasamy.palanisamy@ericsson.com> and others
Introduction
@@ -21,9 +21,8 @@ deployment scenarios, which is derived from the baseline
os-odl-nofeature scenario.
The BGPVPN feature enables creation of BGP VPNs on the Neutron API
-according to the OpenStack BGPVPN blueprint at
-https://blueprints.launchpad.net/neutron/+spec/neutron-bgp-vpn. In a
-nutshell, the blueprint defines a BGPVPN object and a number of ways how
+according to the `OpenStack BGPVPN blueprint <https://blueprints.launchpad.net/neutron/+spec/neutron-bgp-vpn>`_.
+In a nutshell, the blueprint defines a BGPVPN object and a number of ways how
to associate it with the existing Neutron object model, as well as a
unique definition of the related semantics. The BGPVPN framework
supports a backend driver model with currently available drivers for
@@ -72,23 +71,23 @@ Scenario usage overview
Configuring SDNVPN features
---------------------------
-Apex installer has specific procedures to deploy the OPNFV platform so that the SDNVPN feature is enabled.
+Apex installer has specific procedures to deploy the OPNFV platform so that the SDNVPN feature is
+enabled.
-APEX installer configuration
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+APEX installer configuration and BGPVPN deployment
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-To install the SDNVPN feature using the APEX installer, follow the APEX installation guide
-(https://wiki.opnfv.org/display/apex/Integration+Guide) and activate the SDNVPN feature when prompted (step "# Now execute a deployment")
+To install the SDNVPN feature using the APEX installer, follow the `APEX installation guide <(https://wiki.
+opnfv.org/display/apex/Integration+Guide)>`_ . When prompted activate the SDNVPN feature based on
+openstack configuration:
-For os-odl-bgpvpn-noha deployment:
-----------------------------------
+* For os-odl-bgpvpn-noha deployment:
-python3 deploy.py -v -n ../config/network/network_settings.yaml -d ../config/deploy/os-odl-bgpvpn-noha.yaml --deploy-dir ../build --lib-dir ../lib --image-dir ../.build --virtual-computes 2 --virtual-default-ram 16
+ python3 deploy.py -v -n ../config/network/network_settings.yaml -d ../config/deploy/os-odl-bgpvpn-noha.yaml --deploy-dir ../build --lib-dir ../lib --image-dir ../.build --virtual-computes 2 --virtual-default-ram 16
-For os-odl-bgpvpn-ha deployment:
---------------------------------
+* For os-odl-bgpvpn-ha deployment:
-python3 deploy.py -v -n ../config/network/network_settings.yaml -d ../config/deploy/os-odl-bgpvpn-ha.yaml --deploy-dir ../build --lib-dir ../lib --image-dir ../.build --virtual-computes 2 --virtual-default-ram 16
+ python3 deploy.py -v -n ../config/network/network_settings.yaml -d ../config/deploy/os-odl-bgpvpn-ha.yaml --deploy-dir ../build --lib-dir ../lib --image-dir ../.build --virtual-computes 2 --virtual-default-ram 16
Limitations, Issues and Workarounds
===================================
@@ -107,5 +106,5 @@ Integration with data center gateway will not work due to missing OVS patches fo
References
==========
-For more information on the OPNFV Fraser release, please visit
-https://www.opnfv.org/software
+For more information on the OPNFV latest stable release, please visit
+https://www.opnfv.org/software \ No newline at end of file
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000..9fde2df
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,2 @@
+lfdocs-conf
+sphinx_opnfv_theme
diff --git a/odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py b/odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py
index 7e444a8..3d29724 100644
--- a/odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py
+++ b/odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py
@@ -28,8 +28,6 @@ ODL_AAA_JAR = '/opt/opendaylight/bin/aaa-cli-jar.jar'
class ODLReInstaller(Service):
def __init__(self):
- self.netvirt_url = "restconf/operational/network-topology:" \
- "network-topology/topology/netvirt:1"
self.nodes = None
self.odl_node = None
@@ -80,12 +78,12 @@ class ODLReInstaller(Service):
self.reinstall_odl(self.odl_node, odl_artifact)
# Wait for ODL to come back up
- full_netvirt_url = "http://{}:8081/{}".format(
- self.odl_node.config['address'], self.netvirt_url)
+ full_netvirt_url = "http://{}:8081/diagstatus".format(
+ self.odl_node.config['address'])
counter = 1
while counter <= 10:
try:
- self.odl_node.execute("curl --fail -u admin:admin {}".format(
+ self.odl_node.execute("curl --fail {}".format(
full_netvirt_url))
LOG.info("New OpenDaylight NetVirt is Up")
break
@@ -283,5 +281,6 @@ class ODLReinstallerException(Exception):
def main():
ODLReInstaller().start()
+
if __name__ == '__main__':
main()
diff --git a/odl-pipeline/lib/test_environment/test_environment.py b/odl-pipeline/lib/test_environment/test_environment.py
index 65d40bb..a56c36f 100755
--- a/odl-pipeline/lib/test_environment/test_environment.py
+++ b/odl-pipeline/lib/test_environment/test_environment.py
@@ -157,5 +157,6 @@ def main():
main = TestEnvironment()
main.start()
+
if __name__ == '__main__':
main()
diff --git a/odl-pipeline/lib/tripleo_introspector/tripleo_introspector.py b/odl-pipeline/lib/tripleo_introspector/tripleo_introspector.py
index aa6ebbb..9258e83 100755
--- a/odl-pipeline/lib/tripleo_introspector/tripleo_introspector.py
+++ b/odl-pipeline/lib/tripleo_introspector/tripleo_introspector.py
@@ -122,5 +122,6 @@ class TripleOInspectorException(Exception):
def main():
TripleOIntrospector().start()
+
if __name__ == '__main__':
main()
diff --git a/odl-pipeline/lib/utils/processutils.py b/odl-pipeline/lib/utils/processutils.py
index 98162c8..901e74b 100755
--- a/odl-pipeline/lib/utils/processutils.py
+++ b/odl-pipeline/lib/utils/processutils.py
@@ -29,6 +29,7 @@ def _subprocess_setup():
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
# NOTE(flaper87): The following globals are used by `mask_password`
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
diff --git a/requirements.txt b/requirements.txt
index 2689b31..252b214 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,15 +1,12 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr!=2.1.0,>=2.0.0 # Apache-2.0
-requests>=2.14.2 # Apache-2.0
+pbr!=2.1.0 # Apache-2.0
+requests # Apache-2.0
opnfv
-PyYAML>=3.12 # MIT
+PyYAML # MIT
networking-bgpvpn>=7.0.0 # Apache-2.0
-python-cinderclient>=3.3.0 # Apache-2.0
-python-glanceclient>=2.8.0 # Apache-2.0
-python-heatclient>=1.10.0 # Apache-2.0
-python-keystoneclient>=3.8.0 # Apache-2.0
-python-neutronclient>=6.7.0 # Apache-2.0
-python-novaclient>=9.1.0 # Apache-2.0
+python-keystoneclient!=2.1.0 # Apache-2.0
+python-neutronclient # Apache-2.0
xtesting # Apache-2.0
+openstacksdk # Apache-2.0
diff --git a/sdnvpn/artifacts/quagga_setup.sh b/sdnvpn/artifacts/quagga_setup.sh
index fbd229f..c6e6a9c 100644
--- a/sdnvpn/artifacts/quagga_setup.sh
+++ b/sdnvpn/artifacts/quagga_setup.sh
@@ -9,22 +9,22 @@ echo 'ubuntu:opnfv' | chpasswd
sleep 100
# Variables to be filled in with python
-NEIGHBOR_IP=$1
-OWN_IP=$2
+NEIGHBOR_IP={0}
+OWN_IP={1}
# directly access the instance from the external net without NAT
-EXT_NET_MASK=$3
-IP_PREFIX=$4
-RD=$5
-IRT=$6
-ERT=$7
+EXT_NET_MASK={2}
+IP_PREFIX={3}
+RD={4}
+IRT={5}
+ERT={6}
-if [[ $(getent hosts | awk '{print $2}') != *"$(cat /etc/hostname | awk '{print $1}')"* ]]
+if [[ $(getent hosts | awk '{{print $2}}') != *"$(cat /etc/hostname | awk '{{print $1}}')"* ]]
then
-echo "127.0.1.1 $(cat /etc/hostname | awk '{print $1}')" | tee -a /etc/hosts
+echo "127.0.1.1 $(cat /etc/hostname | awk '{{print $1}}')" | tee -a /etc/hosts
fi
quagga_int=''
-for net_int in $(netstat -ia | awk 'NR>2{print $1}');
+for net_int in $(netstat -ia | awk 'NR>2{{print $1}}');
do
if [ -z "$(ifconfig | grep $net_int)" ]
then
@@ -35,10 +35,10 @@ done
if [ -z "$quagga_int" ]
then
echo 'No available network interface'
-fi
-
+else
ip link set $quagga_int up
ip addr add $OWN_IP/$EXT_NET_MASK dev $quagga_int
+fi
# Download quagga/zrpc rpms
cd /root
diff --git a/sdnvpn/artifacts/testcase_1bis.yaml b/sdnvpn/artifacts/testcase_1bis.yaml
new file mode 100644
index 0000000..f269943
--- /dev/null
+++ b/sdnvpn/artifacts/testcase_1bis.yaml
@@ -0,0 +1,234 @@
+heat_template_version: 2013-05-23
+
+description: >
+ Template for SDNVPN testcase 1
+ VPN provides connectivity between subnets
+
+parameters:
+ flavor:
+ type: string
+ description: flavor for the servers to be created
+ constraints:
+ - custom_constraint: nova.flavor
+ image_n:
+ type: string
+ description: image for the servers to be created
+ constraints:
+ - custom_constraint: glance.image
+ av_zone_1:
+ type: string
+ description: availability zone 1
+ av_zone_2:
+ type: string
+ description: availability zone 2
+
+ net_1_name:
+ type: string
+ description: network 1
+ subnet_1_name:
+ type: string
+ description: subnet 1 name
+ subnet_1_cidr:
+ type: string
+ description: subnet 1 cidr
+ net_2_name:
+ type: string
+ description: network 2
+ subnet_2_name:
+ type: string
+ description: subnet 2 name
+ subnet_2_cidr:
+ type: string
+ description: subnet 1 cidr
+
+ secgroup_name:
+ type: string
+ description: security group name
+ secgroup_descr:
+ type: string
+ description: security group slogan
+
+ instance_1_name:
+ type: string
+ description: instance name
+ instance_2_name:
+ type: string
+ description: instance name
+ instance_3_name:
+ type: string
+ description: instance name
+ instance_4_name:
+ type: string
+ description: instance name
+ instance_5_name:
+ type: string
+ description: instance name
+
+ ping_count:
+ type: string
+ description: ping count for user data script
+ default: 10
+
+resources:
+ net_1:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_1_name }
+ subnet_1:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_1_name }
+ network: { get_resource: net_1 }
+ cidr: { get_param: subnet_1_cidr }
+ net_2:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_2_name }
+ subnet_2:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_2_name }
+ network: { get_resource: net_2 }
+ cidr: { get_param: subnet_2_cidr }
+
+ sec_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: { get_param: secgroup_name }
+ description: { get_param: secgroup_descr }
+ rules:
+ - protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+ - protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: 0.0.0.0/0
+
+ vm1:
+ type: OS::Nova::Server
+ depends_on: [ vm2, vm3, vm4, vm5 ]
+ properties:
+ name: { get_param: instance_1_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ config_drive: True
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ set $IP_VM2 $IP_VM3 $IP_VM4 $IP_VM5
+ while true; do
+ for i do
+ ip=$i
+ ping -c $COUNT $ip 2>&1 >/dev/null
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ] ; then
+ echo ping $ip OK
+ else echo ping $ip KO
+ fi
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM2: { get_attr: [vm2, addresses, { get_resource: net_1}, 0, addr] }
+ $IP_VM3: { get_attr: [vm3, addresses, { get_resource: net_1}, 0, addr] }
+ $IP_VM4: { get_attr: [vm4, addresses, { get_resource: net_2}, 0, addr] }
+ $IP_VM5: { get_attr: [vm5, addresses, { get_resource: net_2}, 0, addr] }
+ $COUNT: { get_param: ping_count }
+ vm2:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_2_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ vm3:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_3_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_2 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ vm4:
+ type: OS::Nova::Server
+ depends_on: vm5
+ properties:
+ name: { get_param: instance_4_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_2 }
+ config_drive: True
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ set $IP_VM5
+ while true; do
+ for i do
+ ip=$i
+ ping -c $COUNT $ip 2>&1 >/dev/null
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ] ; then
+ echo ping $ip OK
+ else echo ping $ip KO
+ fi
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM5: { get_attr: [vm5, addresses, { get_resource: net_2}, 0, addr] }
+ $COUNT: { get_param: ping_count }
+
+ vm5:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_5_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_2 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_2 }
+
+outputs:
+ net_1_o:
+ description: the id of network 1
+ value: { get_attr: [net_1, show, id] }
+ net_2_o:
+ description: the id of network 2
+ value: { get_attr: [net_2, show, id] }
+ vm1_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm1, show, name] }
+ vm2_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm2, show, name] }
+ vm3_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm3, show, name] }
+ vm4_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm4, show, name] }
+ vm5_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm5, show, name] }
diff --git a/sdnvpn/artifacts/testcase_2bis.yaml b/sdnvpn/artifacts/testcase_2bis.yaml
new file mode 100644
index 0000000..0319a6d
--- /dev/null
+++ b/sdnvpn/artifacts/testcase_2bis.yaml
@@ -0,0 +1,289 @@
+heat_template_version: 2013-05-23
+
+description: >
+ Template for SDNVPN testcase 2
+ tenant separation
+
+parameters:
+ flavor:
+ type: string
+ description: flavor for the servers to be created
+ constraints:
+ - custom_constraint: nova.flavor
+ image_n:
+ type: string
+ description: image for the servers to be created
+ constraints:
+ - custom_constraint: glance.image
+ av_zone_1:
+ type: string
+ description: availability zone 1
+ id_rsa_key:
+ type: string
+ description: id_rsa file contents for the vms
+
+ net_1_name:
+ type: string
+ description: network 1
+ subnet_1a_name:
+ type: string
+ description: subnet 1a name
+ subnet_1a_cidr:
+ type: string
+ description: subnet 1a cidr
+ subnet_1b_name:
+ type: string
+ description: subnet 1b name
+ subnet_1b_cidr:
+ type: string
+ description: subnet 1b cidr
+ router_1_name:
+ type: string
+ description: router 1 name
+ net_2_name:
+ type: string
+ description: network 2
+ subnet_2a_name:
+ type: string
+ description: subnet 2a name
+ subnet_2a_cidr:
+ type: string
+ description: subnet 2a cidr
+ subnet_2b_name:
+ type: string
+ description: subnet 2b name
+ subnet_2b_cidr:
+ type: string
+ description: subnet 2b cidr
+ router_2_name:
+ type: string
+ description: router 2 name
+
+ secgroup_name:
+ type: string
+ description: security group name
+ secgroup_descr:
+ type: string
+ description: security group slogan
+
+ instance_1_name:
+ type: string
+ description: instance name
+ instance_2_name:
+ type: string
+ description: instance name
+ instance_3_name:
+ type: string
+ description: instance name
+ instance_4_name:
+ type: string
+ description: instance name
+ instance_5_name:
+ type: string
+ description: instance name
+
+ instance_1_ip:
+ type: string
+ description: instance fixed ip
+ instance_2_ip:
+ type: string
+ description: instance fixed ip
+ instance_3_ip:
+ type: string
+ description: instance fixed ip
+ instance_4_ip:
+ type: string
+ description: instance fixed ip
+ instance_5_ip:
+ type: string
+ description: instance fixed ip
+
+resources:
+ net_1:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_1_name }
+ subnet_1a:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_1a_name }
+ network: { get_resource: net_1 }
+ cidr: { get_param: subnet_1a_cidr }
+ net_2:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_2_name }
+ subnet_2b:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_2b_name }
+ network: { get_resource: net_2 }
+ cidr: { get_param: subnet_2b_cidr }
+
+ sec_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: { get_param: secgroup_name }
+ description: { get_param: secgroup_descr }
+ rules:
+ - protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+ - protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: 0.0.0.0/0
+
+ vm1:
+ type: OS::Nova::Server
+ depends_on: [ vm2, vm4 ]
+ properties:
+ name: { get_param: instance_1_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - network: { get_resource: net_1 }
+ fixed_ip: { get_param: instance_1_ip }
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ sudo mkdir -p /home/cirros/.ssh/
+ sudo chown cirros:cirros /home/cirros/.ssh/
+ sudo echo $ID_RSA > /home/cirros/.ssh/id_rsa.enc
+ sudo base64 -d /home/cirros/.ssh/id_rsa.enc > /home/cirros/.ssh/id_rsa
+ sudo chown cirros:cirros /home/cirros/.ssh/id_rsa
+ sudo echo $AUTH_KEYS > /home/cirros/.ssh/authorized_keys
+ sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys
+ chmod 700 /home/cirros/.ssh
+ chmod 644 /home/cirros/.ssh/authorized_keys
+ chmod 600 /home/cirros/.ssh/id_rsa
+ echo gocubsgo > cirros_passwd
+ set $IP_VM2 $IP_VM4
+ echo will try to ssh to $IP_VM2 and $IP_VM4
+ while true; do
+ for i do
+ ip=$i
+ hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa cirros@$ip 'hostname' </dev/zero 2>/dev/null)
+ RES=$?
+ echo $RES
+ if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;
+ else echo $ip 'not reachable';fi;
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM2: { get_param: instance_2_ip }
+ $IP_VM4: { get_param: instance_4_ip }
+ $ID_RSA: { get_param: id_rsa_key }
+ $AUTH_KEYS: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e\
+ stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9\
+ sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU\
+ ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= cirros@test1"
+ vm2:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_2_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - network: { get_resource: net_1 }
+ fixed_ip: { get_param: instance_2_ip }
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ sudo mkdir -p /home/cirros/.ssh/
+ sudo chown cirros:cirros /home/cirros/.ssh/
+ sudo echo $ID_RSA > /home/cirros/.ssh/id_rsa.enc
+ sudo base64 -d /home/cirros/.ssh/id_rsa.enc > /home/cirros/.ssh/id_rsa
+ sudo chown cirros:cirros /home/cirros/.ssh/id_rsa
+ sudo echo $AUTH_KEYS > /home/cirros/.ssh/authorized_keys
+ sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys
+ chmod 700 /home/cirros/.ssh
+ chmod 644 /home/cirros/.ssh/authorized_keys
+ chmod 600 /home/cirros/.ssh/id_rsa
+ params:
+ $ID_RSA: { get_param: id_rsa_key }
+ $AUTH_KEYS: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e\
+ stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9\
+ sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU\
+ ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= cirros@test1"
+ vm4:
+ type: OS::Nova::Server
+ depends_on: vm2
+ properties:
+ name: { get_param: instance_4_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - network: { get_resource: net_2 }
+ fixed_ip: { get_param: instance_4_ip }
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ sudo mkdir -p /home/cirros/.ssh/
+ sudo chown cirros:cirros /home/cirros/.ssh/
+ sudo echo $ID_RSA > /home/cirros/.ssh/id_rsa.enc
+ sudo base64 -d /home/cirros/.ssh/id_rsa.enc > /home/cirros/.ssh/id_rsa
+ sudo chown cirros:cirros /home/cirros/.ssh/id_rsa
+ sudo echo $AUTH_KEYS > /home/cirros/.ssh/authorized_keys
+ sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys
+ chmod 700 /home/cirros/.ssh
+ chmod 644 /home/cirros/.ssh/authorized_keys
+ chmod 600 /home/cirros/.ssh/id_rsa
+ set $IP_VM1
+ echo will try to ssh to $IP_VM1
+ while true; do
+ for i do
+ ip=$i
+ hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa cirros@$ip 'hostname' </dev/zero 2>/dev/null)
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;
+ else echo $ip 'not reachable';fi;
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM1: { get_param: instance_1_ip }
+ $ID_RSA: { get_param: id_rsa_key }
+ $AUTH_KEYS: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e\
+ stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9\
+ sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU\
+ ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= cirros@test1"
+ $DROPBEAR_PASSWORD: gocubsgo
+outputs:
+ net_1_o:
+ description: the id of network 1
+ value: { get_attr: [net_1, show, id] }
+ net_2_o:
+ description: the id of network 2
+ value: { get_attr: [net_2, show, id] }
+
+ vm1_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm1, show, name] }
+ vm2_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm2, show, name] }
+ vm3_o:
+ description: dummy
+ value: { get_attr: [vm2, show, name] }
+ vm4_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm4, show, name] }
+ vm5_o:
+ description: dummy
+ value: { get_attr: [vm2, show, name] }
diff --git a/sdnvpn/artifacts/testcase_4bis.yaml b/sdnvpn/artifacts/testcase_4bis.yaml
new file mode 100644
index 0000000..ee59e1d
--- /dev/null
+++ b/sdnvpn/artifacts/testcase_4bis.yaml
@@ -0,0 +1,247 @@
+heat_template_version: 2013-05-23
+
+description: >
+ Template for SDNVPN testcase 4
+ VPN provides connectivity between subnets using router association
+
+parameters:
+ flavor:
+ type: string
+ description: flavor for the servers to be created
+ constraints:
+ - custom_constraint: nova.flavor
+ image_n:
+ type: string
+ description: image for the servers to be created
+ constraints:
+ - custom_constraint: glance.image
+ av_zone_1:
+ type: string
+ description: availability zone 1
+ av_zone_2:
+ type: string
+ description: availability zone 2
+
+ net_1_name:
+ type: string
+ description: network 1
+ subnet_1_name:
+ type: string
+ description: subnet 1 name
+ subnet_1_cidr:
+ type: string
+ description: subnet 1 cidr
+ router_1_name:
+ type: string
+ description: router 1 cidr
+ net_2_name:
+ type: string
+ description: network 2
+ subnet_2_name:
+ type: string
+ description: subnet 2 name
+ subnet_2_cidr:
+ type: string
+ description: subnet 1 cidr
+
+ secgroup_name:
+ type: string
+ description: security group name
+ secgroup_descr:
+ type: string
+ description: security group slogan
+
+ instance_1_name:
+ type: string
+ description: instance name
+ instance_2_name:
+ type: string
+ description: instance name
+ instance_3_name:
+ type: string
+ description: instance name
+ instance_4_name:
+ type: string
+ description: instance name
+ instance_5_name:
+ type: string
+ description: instance name
+
+ ping_count:
+ type: string
+ description: ping count for user data script
+ default: 10
+
+resources:
+ net_1:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_1_name }
+ subnet_1:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_1_name }
+ network: { get_resource: net_1 }
+ cidr: { get_param: subnet_1_cidr }
+ router_1:
+ type: OS::Neutron::Router
+ properties:
+ name: { get_param: router_1_name }
+ routerinterface_1:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router_1 }
+ subnet_id: { get_resource: subnet_1 }
+
+ net_2:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_2_name }
+ subnet_2:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_2_name }
+ network: { get_resource: net_2 }
+ cidr: { get_param: subnet_2_cidr }
+
+ sec_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: { get_param: secgroup_name }
+ description: { get_param: secgroup_descr }
+ rules:
+ - protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+ - protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: 0.0.0.0/0
+
+ vm1:
+ type: OS::Nova::Server
+ depends_on: [ vm2, vm3, vm4, vm5 ]
+ properties:
+ name: { get_param: instance_1_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ config_drive: True
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ set $IP_VM2 $IP_VM3 $IP_VM4 $IP_VM5
+ while true; do
+ for i do
+ ip=$i
+ ping -c $COUNT $ip 2>&1 >/dev/null
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ] ; then
+ echo ping $ip OK
+ else echo ping $ip KO
+ fi
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM2: { get_attr: [vm2, addresses, { get_resource: net_1}, 0, addr] }
+ $IP_VM3: { get_attr: [vm3, addresses, { get_resource: net_1}, 0, addr] }
+ $IP_VM4: { get_attr: [vm4, addresses, { get_resource: net_2}, 0, addr] }
+ $IP_VM5: { get_attr: [vm5, addresses, { get_resource: net_2}, 0, addr] }
+ $COUNT: { get_param: ping_count }
+ vm2:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_2_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ vm3:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_3_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_2 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ vm4:
+ type: OS::Nova::Server
+ depends_on: vm5
+ properties:
+ name: { get_param: instance_4_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_2 }
+ config_drive: True
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ set $IP_VM5
+ while true; do
+ for i do
+ ip=$i
+ ping -c $COUNT $ip 2>&1 >/dev/null
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ] ; then
+ echo ping $ip OK
+ else echo ping $ip KO
+ fi
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM5: { get_attr: [vm5, addresses, { get_resource: net_2}, 0, addr] }
+ $COUNT: { get_param: ping_count }
+
+ vm5:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_5_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_2 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_2 }
+
+outputs:
+ router_1_o:
+ description: the id of network 1
+ value: { get_attr: [router_1, show, id] }
+ net_2_o:
+ description: the id of network 2
+ value: { get_attr: [net_2, show, id] }
+ vm1_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm1, show, name] }
+ vm2_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm2, show, name] }
+ vm3_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm3, show, name] }
+ vm4_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm4, show, name] }
+ vm5_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm5, show, name] }
diff --git a/sdnvpn/artifacts/testcase_8bis.yaml b/sdnvpn/artifacts/testcase_8bis.yaml
new file mode 100644
index 0000000..94853c3
--- /dev/null
+++ b/sdnvpn/artifacts/testcase_8bis.yaml
@@ -0,0 +1,173 @@
+heat_template_version: 2013-05-23
+
+description: >
+ Template for SDNVPN testcase 8
+ Test floating IP and router assoc coexistence
+
+parameters:
+ flavor:
+ type: string
+ description: flavor for the servers to be created
+ constraints:
+ - custom_constraint: nova.flavor
+ image_n:
+ type: string
+ description: image for the servers to be created
+ constraints:
+ - custom_constraint: glance.image
+ av_zone_1:
+ type: string
+ description: availability zone 1
+
+ external_nw:
+ type: string
+ description: the external network
+ net_1_name:
+ type: string
+ description: network 1
+ subnet_1_name:
+ type: string
+ description: subnet 1 name
+ subnet_1_cidr:
+ type: string
+ description: subnet 1 cidr
+ router_1_name:
+ type: string
+ description: router 1 cidr
+ net_2_name:
+ type: string
+ description: network 2
+ subnet_2_name:
+ type: string
+ description: subnet 2 name
+ subnet_2_cidr:
+ type: string
+ description: subnet 1 cidr
+
+ secgroup_name:
+ type: string
+ description: security group name
+ secgroup_descr:
+ type: string
+ description: security group slogan
+
+ instance_1_name:
+ type: string
+ description: instance name
+ instance_2_name:
+ type: string
+ description: instance name
+
+ ping_count:
+ type: string
+ description: ping count for user data script
+ default: 10
+
+resources:
+ router_1:
+ type: OS::Neutron::Router
+ properties:
+ name: { get_param: router_1_name }
+ external_gateway_info:
+ network: { get_param: external_nw }
+
+ net_1:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_1_name }
+ subnet_1:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_1_name }
+ network: { get_resource: net_1 }
+ cidr: { get_param: subnet_1_cidr }
+ routerinterface_1:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router_1 }
+ subnet_id: { get_resource: subnet_1 }
+
+ net_2:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_2_name }
+ subnet_2:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_2_name }
+ network: { get_resource: net_2 }
+ cidr: { get_param: subnet_2_cidr }
+
+ sec_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: { get_param: secgroup_name }
+ description: { get_param: secgroup_descr }
+ rules:
+ - protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+ - protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: 0.0.0.0/0
+
+ vm1:
+ type: OS::Nova::Server
+ depends_on: [ vm2 ]
+ properties:
+ name: { get_param: instance_1_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ config_drive: True
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ set $IP_VM2
+ while true; do
+ for i do
+ ip=$i
+ ping -c $COUNT $ip 2>&1 >/dev/null
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ] ; then
+ echo ping $ip OK
+ else echo ping $ip KO
+ fi
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM2: { get_attr: [vm2, addresses, { get_resource: net_1}, 0, addr] }
+ $COUNT: { get_param: ping_count }
+ vm2:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_2_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_2 }
+
+
+outputs:
+ router_1_o:
+ description: the id of network 1
+ value: { get_attr: [router_1, show, id] }
+ net_2_o:
+ description: the id of network 2
+ value: { get_attr: [net_2, show, id] }
+ vm1_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm1, show, name] }
+ vm2_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm2, show, name] }
diff --git a/sdnvpn/artifacts/testcase_8bis_upd.yaml b/sdnvpn/artifacts/testcase_8bis_upd.yaml
new file mode 100644
index 0000000..4661e8a
--- /dev/null
+++ b/sdnvpn/artifacts/testcase_8bis_upd.yaml
@@ -0,0 +1,17 @@
+heat_template_version: 2013-05-23
+
+resources:
+ fip_1:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: { get_param: external_nw }
+ fip_1_assoc:
+ type: OS::Neutron::FloatingIPAssociation
+ properties:
+ floatingip_id: { get_resource: fip_1 }
+ port_id: {get_attr: [vm1, addresses, {get_resource: net_1}, 0, port]}
+
+outputs:
+ fip_1_o:
+ description: the floating IP for vm1
+ value: { get_attr: [fip_1, show, floating_ip_address] }
diff --git a/sdnvpn/lib/openstack_utils.py b/sdnvpn/lib/openstack_utils.py
index 29843f0..5fc1e49 100644
--- a/sdnvpn/lib/openstack_utils.py
+++ b/sdnvpn/lib/openstack_utils.py
@@ -8,6 +8,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
+import base64
import logging
import os.path
import shutil
@@ -17,19 +18,17 @@ import urllib
from keystoneauth1 import loading
from keystoneauth1 import session
-from cinderclient import client as cinderclient
-from glanceclient import client as glanceclient
-from heatclient import client as heatclient
-from novaclient import client as novaclient
from keystoneclient import client as keystoneclient
from neutronclient.neutron import client as neutronclient
+from openstack import connection
+from openstack import cloud as os_cloud
+from openstack.exceptions import ResourceNotFound
from functest.utils import env
logger = logging.getLogger(__name__)
DEFAULT_API_VERSION = '2'
-DEFAULT_HEAT_API_VERSION = '1'
# *********************************************
@@ -44,6 +43,14 @@ class MissingEnvVar(Exception):
return str.format("Please set the mandatory env var: {}", self.var)
+def get_os_connection():
+ return connection.from_config()
+
+
+def get_os_cloud():
+ return os_cloud.openstack_cloud()
+
+
def is_keystone_v3():
keystone_api_version = os.getenv('OS_IDENTITY_API_VERSION')
if (keystone_api_version is None or
@@ -156,34 +163,6 @@ def get_keystone_client(other_creds={}):
interface=os.getenv('OS_INTERFACE', 'admin'))
-def get_nova_client_version():
- api_version = os.getenv('OS_COMPUTE_API_VERSION')
- if api_version is not None:
- logger.info("OS_COMPUTE_API_VERSION is set in env as '%s'",
- api_version)
- return api_version
- return DEFAULT_API_VERSION
-
-
-def get_nova_client(other_creds={}):
- sess = get_session(other_creds)
- return novaclient.Client(get_nova_client_version(), session=sess)
-
-
-def get_cinder_client_version():
- api_version = os.getenv('OS_VOLUME_API_VERSION')
- if api_version is not None:
- logger.info("OS_VOLUME_API_VERSION is set in env as '%s'",
- api_version)
- return api_version
- return DEFAULT_API_VERSION
-
-
-def get_cinder_client(other_creds={}):
- sess = get_session(other_creds)
- return cinderclient.Client(get_cinder_client_version(), session=sess)
-
-
def get_neutron_client_version():
api_version = os.getenv('OS_NETWORK_API_VERSION')
if api_version is not None:
@@ -198,33 +177,6 @@ def get_neutron_client(other_creds={}):
return neutronclient.Client(get_neutron_client_version(), session=sess)
-def get_glance_client_version():
- api_version = os.getenv('OS_IMAGE_API_VERSION')
- if api_version is not None:
- logger.info("OS_IMAGE_API_VERSION is set in env as '%s'", api_version)
- return api_version
- return DEFAULT_API_VERSION
-
-
-def get_glance_client(other_creds={}):
- sess = get_session(other_creds)
- return glanceclient.Client(get_glance_client_version(), session=sess)
-
-
-def get_heat_client_version():
- api_version = os.getenv('OS_ORCHESTRATION_API_VERSION')
- if api_version is not None:
- logger.info("OS_ORCHESTRATION_API_VERSION is set in env as '%s'",
- api_version)
- return api_version
- return DEFAULT_HEAT_API_VERSION
-
-
-def get_heat_client(other_creds={}):
- sess = get_session(other_creds)
- return heatclient.Client(get_heat_client_version(), session=sess)
-
-
def download_url(url, dest_path):
"""
Download a file to a destination path given a URL
@@ -241,7 +193,7 @@ def download_url(url, dest_path):
return True
-def download_and_add_image_on_glance(glance, image_name, image_url, data_dir):
+def download_and_add_image_on_glance(conn, image_name, image_url, data_dir):
try:
dest_path = data_dir
if not os.path.exists(dest_path):
@@ -255,7 +207,7 @@ def download_and_add_image_on_glance(glance, image_name, image_url, data_dir):
try:
image = create_glance_image(
- glance, image_name, dest_path + file_name)
+ conn, image_name, dest_path + file_name)
if not image:
return False
else:
@@ -268,36 +220,37 @@ def download_and_add_image_on_glance(glance, image_name, image_url, data_dir):
# *********************************************
# NOVA
# *********************************************
-def get_instances(nova_client):
+def get_instances(conn):
try:
- instances = nova_client.servers.list(search_opts={'all_tenants': 1})
+ instances = conn.compute.servers(all_tenants=1)
return instances
except Exception as e:
- logger.error("Error [get_instances(nova_client)]: %s" % e)
+ logger.error("Error [get_instances(compute)]: %s" % e)
return None
-def get_instance_status(nova_client, instance):
+def get_instance_status(conn, instance):
try:
- instance = nova_client.servers.get(instance.id)
+ instance = conn.compute.get_server(instance.id)
return instance.status
except Exception as e:
- logger.error("Error [get_instance_status(nova_client)]: %s" % e)
+ logger.error("Error [get_instance_status(compute)]: %s" % e)
return None
-def get_instance_by_name(nova_client, instance_name):
+def get_instance_by_name(conn, instance_name):
try:
- instance = nova_client.servers.find(name=instance_name)
+ instance = conn.compute.find_server(instance_name,
+ ignore_missing=False)
return instance
except Exception as e:
- logger.error("Error [get_instance_by_name(nova_client, '%s')]: %s"
+ logger.error("Error [get_instance_by_name(compute, '%s')]: %s"
% (instance_name, e))
return None
-def get_flavor_id(nova_client, flavor_name):
- flavors = nova_client.flavors.list(detailed=True)
+def get_flavor_id(conn, flavor_name):
+ flavors = conn.compute.flavors()
id = ''
for f in flavors:
if f.name == flavor_name:
@@ -306,8 +259,8 @@ def get_flavor_id(nova_client, flavor_name):
return id
-def get_flavor_id_by_ram_range(nova_client, min_ram, max_ram):
- flavors = nova_client.flavors.list(detailed=True)
+def get_flavor_id_by_ram_range(conn, min_ram, max_ram):
+ flavors = conn.compute.flavors()
id = ''
for f in flavors:
if min_ram <= f.ram and f.ram <= max_ram:
@@ -316,51 +269,52 @@ def get_flavor_id_by_ram_range(nova_client, min_ram, max_ram):
return id
-def get_aggregates(nova_client):
+def get_aggregates(cloud):
try:
- aggregates = nova_client.aggregates.list()
+ aggregates = cloud.list_aggregates()
return aggregates
except Exception as e:
- logger.error("Error [get_aggregates(nova_client)]: %s" % e)
+ logger.error("Error [get_aggregates(compute)]: %s" % e)
return None
-def get_aggregate_id(nova_client, aggregate_name):
+def get_aggregate_id(cloud, aggregate_name):
try:
- aggregates = get_aggregates(nova_client)
+ aggregates = get_aggregates(cloud)
_id = [ag.id for ag in aggregates if ag.name == aggregate_name][0]
return _id
except Exception as e:
- logger.error("Error [get_aggregate_id(nova_client, %s)]:"
+ logger.error("Error [get_aggregate_id(compute, %s)]:"
" %s" % (aggregate_name, e))
return None
-def get_availability_zones(nova_client):
+def get_availability_zones(conn):
try:
- availability_zones = nova_client.availability_zones.list()
+ availability_zones = conn.compute.availability_zones()
return availability_zones
except Exception as e:
- logger.error("Error [get_availability_zones(nova_client)]: %s" % e)
+ logger.error("Error [get_availability_zones(compute)]: %s" % e)
return None
-def get_availability_zone_names(nova_client):
+def get_availability_zone_names(conn):
try:
- az_names = [az.zoneName for az in get_availability_zones(nova_client)]
+ az_names = [az.zoneName for az in get_availability_zones(conn)]
return az_names
except Exception as e:
- logger.error("Error [get_availability_zone_names(nova_client)]:"
+ logger.error("Error [get_availability_zone_names(compute)]:"
" %s" % e)
return None
-def create_flavor(nova_client, flavor_name, ram, disk, vcpus, public=True):
+def create_flavor(conn, flavor_name, ram, disk, vcpus, public=True):
try:
- flavor = nova_client.flavors.create(
- flavor_name, ram, vcpus, disk, is_public=public)
+ flavor = conn.compute.create_flavor(
+ name=flavor_name, ram=ram, disk=disk, vcpus=vcpus,
+ is_public=public)
except Exception as e:
- logger.error("Error [create_flavor(nova_client, '%s', '%s', '%s', "
+ logger.error("Error [create_flavor(compute, '%s', '%s', '%s', "
"'%s')]: %s" % (flavor_name, ram, disk, vcpus, e))
return None
return flavor.id
@@ -368,9 +322,9 @@ def create_flavor(nova_client, flavor_name, ram, disk, vcpus, public=True):
def get_or_create_flavor(flavor_name, ram, disk, vcpus, public=True):
flavor_exists = False
- nova_client = get_nova_client()
+ conn = get_os_connection()
- flavor_id = get_flavor_id(nova_client, flavor_name)
+ flavor_id = get_flavor_id(conn, flavor_name)
if flavor_id != '':
logger.info("Using existing flavor '%s'..." % flavor_name)
flavor_exists = True
@@ -378,7 +332,7 @@ def get_or_create_flavor(flavor_name, ram, disk, vcpus, public=True):
logger.info("Creating flavor '%s' with '%s' RAM, '%s' disk size, "
"'%s' vcpus..." % (flavor_name, ram, disk, vcpus))
flavor_id = create_flavor(
- nova_client, flavor_name, ram, disk, vcpus, public=public)
+ conn, flavor_name, ram, disk, vcpus, public=public)
if not flavor_id:
raise Exception("Failed to create flavor '%s'..." % (flavor_name))
else:
@@ -388,58 +342,58 @@ def get_or_create_flavor(flavor_name, ram, disk, vcpus, public=True):
return flavor_exists, flavor_id
-def get_floating_ips(neutron_client):
+def get_floating_ips(conn):
try:
- floating_ips = neutron_client.list_floatingips()
- return floating_ips['floatingips']
+ floating_ips = conn.network.ips()
+ return floating_ips
except Exception as e:
- logger.error("Error [get_floating_ips(neutron_client)]: %s" % e)
+ logger.error("Error [get_floating_ips(network)]: %s" % e)
return None
-def get_hypervisors(nova_client):
+def get_hypervisors(conn):
try:
nodes = []
- hypervisors = nova_client.hypervisors.list()
+ hypervisors = conn.compute.hypervisors()
for hypervisor in hypervisors:
if hypervisor.state == "up":
- nodes.append(hypervisor.hypervisor_hostname)
+ nodes.append(hypervisor.name)
return nodes
except Exception as e:
- logger.error("Error [get_hypervisors(nova_client)]: %s" % e)
+ logger.error("Error [get_hypervisors(compute)]: %s" % e)
return None
-def create_aggregate(nova_client, aggregate_name, av_zone):
+def create_aggregate(cloud, aggregate_name, av_zone):
try:
- nova_client.aggregates.create(aggregate_name, av_zone)
+ cloud.create_aggregate(aggregate_name, av_zone)
return True
except Exception as e:
- logger.error("Error [create_aggregate(nova_client, %s, %s)]: %s"
+ logger.error("Error [create_aggregate(compute, %s, %s)]: %s"
% (aggregate_name, av_zone, e))
return None
-def add_host_to_aggregate(nova_client, aggregate_name, compute_host):
+def add_host_to_aggregate(cloud, aggregate_name, compute_host):
try:
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- nova_client.aggregates.add_host(aggregate_id, compute_host)
+ aggregate_id = get_aggregate_id(cloud, aggregate_name)
+ cloud.add_host_to_aggregate(aggregate_id, compute_host)
return True
except Exception as e:
- logger.error("Error [add_host_to_aggregate(nova_client, %s, %s)]: %s"
+ logger.error("Error [add_host_to_aggregate(compute, %s, %s)]: %s"
% (aggregate_name, compute_host, e))
return None
def create_aggregate_with_host(
- nova_client, aggregate_name, av_zone, compute_host):
+ cloud, aggregate_name, av_zone, compute_host):
try:
- create_aggregate(nova_client, aggregate_name, av_zone)
- add_host_to_aggregate(nova_client, aggregate_name, compute_host)
+ create_aggregate(cloud, aggregate_name, av_zone)
+ add_host_to_aggregate(cloud, aggregate_name, compute_host)
return True
except Exception as e:
logger.error("Error [create_aggregate_with_host("
- "nova_client, %s, %s, %s)]: %s"
+ "compute, %s, %s, %s)]: %s"
% (aggregate_name, av_zone, compute_host, e))
return None
@@ -450,41 +404,36 @@ def create_instance(flavor_name,
instance_name="functest-vm",
confdrive=True,
userdata=None,
- av_zone='',
+ av_zone=None,
fixed_ip=None,
- files=None):
- nova_client = get_nova_client()
+ files=[]):
+ conn = get_os_connection()
try:
- flavor = nova_client.flavors.find(name=flavor_name)
- except:
- flavors = nova_client.flavors.list()
+ flavor = conn.compute.find_flavor(flavor_name, ignore_missing=False)
+ except Exception:
+ flavors = [flavor.name for flavor in conn.compute.flavors()]
logger.error("Error: Flavor '%s' not found. Available flavors are: "
"\n%s" % (flavor_name, flavors))
return None
if fixed_ip is not None:
- nics = {"net-id": network_id, "v4-fixed-ip": fixed_ip}
- else:
- nics = {"net-id": network_id}
- if userdata is None:
- instance = nova_client.servers.create(
- name=instance_name,
- flavor=flavor,
- image=image_id,
- nics=[nics],
- availability_zone=av_zone,
- files=files
- )
+ networks = {"uuid": network_id, "fixed_ip": fixed_ip}
else:
- instance = nova_client.servers.create(
- name=instance_name,
- flavor=flavor,
- image=image_id,
- nics=[nics],
- config_drive=confdrive,
- userdata=userdata,
- availability_zone=av_zone,
- files=files
- )
+ networks = {"uuid": network_id}
+
+ server_attrs = {
+ 'name': instance_name,
+ 'flavor_id': flavor.id,
+ 'image_id': image_id,
+ 'networks': [networks],
+ 'personality': files
+ }
+ if userdata is not None:
+ server_attrs['config_drive'] = confdrive
+ server_attrs['user_data'] = base64.b64encode(userdata.encode())
+ if av_zone is not None:
+ server_attrs['availability_zone'] = av_zone
+
+ instance = conn.compute.create_server(**server_attrs)
return instance
@@ -494,12 +443,12 @@ def create_instance_and_wait_for_active(flavor_name,
instance_name="",
config_drive=False,
userdata="",
- av_zone='',
+ av_zone=None,
fixed_ip=None,
- files=None):
+ files=[]):
SLEEP = 3
VM_BOOT_TIMEOUT = 180
- nova_client = get_nova_client()
+ conn = get_os_connection()
instance = create_instance(flavor_name,
image_id,
network_id,
@@ -511,7 +460,7 @@ def create_instance_and_wait_for_active(flavor_name,
files=files)
count = VM_BOOT_TIMEOUT / SLEEP
for n in range(count, -1, -1):
- status = get_instance_status(nova_client, instance)
+ status = get_instance_status(conn, instance)
if status is None:
time.sleep(SLEEP)
continue
@@ -526,87 +475,85 @@ def create_instance_and_wait_for_active(flavor_name,
return None
-def create_floating_ip(neutron_client):
- extnet_id = get_external_net_id(neutron_client)
- props = {'floating_network_id': extnet_id}
+def create_floating_ip(conn):
+ extnet_id = get_external_net_id(conn)
try:
- ip_json = neutron_client.create_floatingip({'floatingip': props})
- fip_addr = ip_json['floatingip']['floating_ip_address']
- fip_id = ip_json['floatingip']['id']
+ fip = conn.network.create_ip(floating_network_id=extnet_id)
+ fip_addr = fip.floating_ip_address
+ fip_id = fip.id
except Exception as e:
- logger.error("Error [create_floating_ip(neutron_client)]: %s" % e)
+ logger.error("Error [create_floating_ip(network)]: %s" % e)
return None
return {'fip_addr': fip_addr, 'fip_id': fip_id}
-def attach_floating_ip(neutron_client, port_id):
- extnet_id = get_external_net_id(neutron_client)
- props = {'floating_network_id': extnet_id,
- 'port_id': port_id}
+def attach_floating_ip(conn, port_id):
+ extnet_id = get_external_net_id(conn)
try:
- return neutron_client.create_floatingip({'floatingip': props})
+ return conn.network.create_ip(floating_network_id=extnet_id,
+ port_id=port_id)
except Exception as e:
- logger.error("Error [Attach_floating_ip(neutron_client), %s]: %s"
+ logger.error("Error [Attach_floating_ip(network), %s]: %s"
% (port_id, e))
return None
-def add_floating_ip(nova_client, server_id, floatingip_addr):
+def add_floating_ip(conn, server_id, floatingip_addr):
try:
- nova_client.servers.add_floating_ip(server_id, floatingip_addr)
+ conn.compute.add_floating_ip_to_server(server_id, floatingip_addr)
return True
except Exception as e:
- logger.error("Error [add_floating_ip(nova_client, '%s', '%s')]: %s"
+ logger.error("Error [add_floating_ip(compute, '%s', '%s')]: %s"
% (server_id, floatingip_addr, e))
return False
-def delete_instance(nova_client, instance_id):
+def delete_instance(conn, instance_id):
try:
- nova_client.servers.force_delete(instance_id)
+ conn.compute.delete_server(instance_id, force=True)
return True
except Exception as e:
- logger.error("Error [delete_instance(nova_client, '%s')]: %s"
+ logger.error("Error [delete_instance(compute, '%s')]: %s"
% (instance_id, e))
return False
-def delete_floating_ip(neutron_client, floatingip_id):
+def delete_floating_ip(conn, floatingip_id):
try:
- neutron_client.delete_floatingip(floatingip_id)
+ conn.network.delete_ip(floatingip_id)
return True
except Exception as e:
- logger.error("Error [delete_floating_ip(neutron_client, '%s')]: %s"
+ logger.error("Error [delete_floating_ip(network, '%s')]: %s"
% (floatingip_id, e))
return False
-def remove_host_from_aggregate(nova_client, aggregate_name, compute_host):
+def remove_host_from_aggregate(cloud, aggregate_name, compute_host):
try:
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- nova_client.aggregates.remove_host(aggregate_id, compute_host)
+ aggregate_id = get_aggregate_id(cloud, aggregate_name)
+ cloud.remove_host_from_aggregate(aggregate_id, compute_host)
return True
except Exception as e:
- logger.error("Error [remove_host_from_aggregate(nova_client, %s, %s)]:"
+ logger.error("Error [remove_host_from_aggregate(compute, %s, %s)]:"
" %s" % (aggregate_name, compute_host, e))
return False
-def remove_hosts_from_aggregate(nova_client, aggregate_name):
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- hosts = nova_client.aggregates.get(aggregate_id).hosts
+def remove_hosts_from_aggregate(cloud, aggregate_name):
+ aggregate_id = get_aggregate_id(cloud, aggregate_name)
+ hosts = cloud.get_aggregate(aggregate_id).hosts
assert(
- all(remove_host_from_aggregate(nova_client, aggregate_name, host)
+ all(remove_host_from_aggregate(cloud, aggregate_name, host)
for host in hosts))
-def delete_aggregate(nova_client, aggregate_name):
+def delete_aggregate(cloud, aggregate_name):
try:
- remove_hosts_from_aggregate(nova_client, aggregate_name)
- nova_client.aggregates.delete(aggregate_name)
+ remove_hosts_from_aggregate(cloud, aggregate_name)
+ cloud.delete_aggregate(aggregate_name)
return True
except Exception as e:
- logger.error("Error [delete_aggregate(nova_client, %s)]: %s"
+ logger.error("Error [delete_aggregate(compute, %s)]: %s"
% (aggregate_name, e))
return False
@@ -614,266 +561,237 @@ def delete_aggregate(nova_client, aggregate_name):
# *********************************************
# NEUTRON
# *********************************************
-def get_network_list(neutron_client):
- network_list = neutron_client.list_networks()['networks']
- if len(network_list) == 0:
- return None
- else:
- return network_list
+def get_network_list(conn):
+ return conn.network.networks()
-def get_router_list(neutron_client):
- router_list = neutron_client.list_routers()['routers']
- if len(router_list) == 0:
- return None
- else:
- return router_list
+def get_router_list(conn):
+ return conn.network.routers()
-def get_port_list(neutron_client):
- port_list = neutron_client.list_ports()['ports']
- if len(port_list) == 0:
- return None
- else:
- return port_list
+def get_port_list(conn):
+ return conn.network.ports()
-def get_network_id(neutron_client, network_name):
- networks = neutron_client.list_networks()['networks']
+def get_network_id(conn, network_name):
+ networks = conn.network.networks()
id = ''
for n in networks:
- if n['name'] == network_name:
- id = n['id']
+ if n.name == network_name:
+ id = n.id
break
return id
-def get_subnet_id(neutron_client, subnet_name):
- subnets = neutron_client.list_subnets()['subnets']
+def get_subnet_id(conn, subnet_name):
+ subnets = conn.network.subnets()
id = ''
for s in subnets:
- if s['name'] == subnet_name:
- id = s['id']
+ if s.name == subnet_name:
+ id = s.id
break
return id
-def get_router_id(neutron_client, router_name):
- routers = neutron_client.list_routers()['routers']
+def get_router_id(conn, router_name):
+ routers = conn.network.routers()
id = ''
for r in routers:
- if r['name'] == router_name:
- id = r['id']
+ if r.name == router_name:
+ id = r.id
break
return id
-def get_private_net(neutron_client):
+def get_private_net(conn):
# Checks if there is an existing shared private network
- networks = neutron_client.list_networks()['networks']
- if len(networks) == 0:
- return None
+ networks = conn.network.networks()
for net in networks:
- if (net['router:external'] is False) and (net['shared'] is True):
+ if (net.is_router_external is False) and (net.is_shared is True):
return net
return None
-def get_external_net(neutron_client):
+def get_external_net(conn):
if (env.get('EXTERNAL_NETWORK')):
return env.get('EXTERNAL_NETWORK')
- for network in neutron_client.list_networks()['networks']:
- if network['router:external']:
- return network['name']
+ for network in conn.network.networks():
+ if network.is_router_external:
+ return network.name
return None
-def get_external_net_id(neutron_client):
+def get_external_net_id(conn):
if (env.get('EXTERNAL_NETWORK')):
- networks = neutron_client.list_networks(
- name=env.get('EXTERNAL_NETWORK'))
- net_id = networks['networks'][0]['id']
+ networks = conn.network.networks(name=env.get('EXTERNAL_NETWORK'))
+ net_id = networks.next().id
return net_id
- for network in neutron_client.list_networks()['networks']:
- if network['router:external']:
- return network['id']
+ for network in conn.network.networks():
+ if network.is_router_external:
+ return network.id
return None
-def check_neutron_net(neutron_client, net_name):
- for network in neutron_client.list_networks()['networks']:
- if network['name'] == net_name:
- for subnet in network['subnets']:
+def check_neutron_net(conn, net_name):
+ for network in conn.network.networks():
+ if network.name == net_name:
+ for subnet in network.subnet_ids:
return True
return False
-def create_neutron_net(neutron_client, name):
- json_body = {'network': {'name': name,
- 'admin_state_up': True}}
+def create_neutron_net(conn, name):
try:
- network = neutron_client.create_network(body=json_body)
- network_dict = network['network']
- return network_dict['id']
+ network = conn.network.create_network(name=name)
+ return network.id
except Exception as e:
- logger.error("Error [create_neutron_net(neutron_client, '%s')]: %s"
+ logger.error("Error [create_neutron_net(network, '%s')]: %s"
% (name, e))
return None
-def create_neutron_subnet(neutron_client, name, cidr, net_id,
+def create_neutron_subnet(conn, name, cidr, net_id,
dns=['8.8.8.8', '8.8.4.4']):
- json_body = {'subnets': [{'name': name, 'cidr': cidr,
- 'ip_version': 4, 'network_id': net_id,
- 'dns_nameservers': dns}]}
-
try:
- subnet = neutron_client.create_subnet(body=json_body)
- return subnet['subnets'][0]['id']
+ subnet = conn.network.create_subnet(name=name,
+ cidr=cidr,
+ ip_version='4',
+ network_id=net_id,
+ dns_nameservers=dns)
+ return subnet.id
except Exception as e:
- logger.error("Error [create_neutron_subnet(neutron_client, '%s', "
+ logger.error("Error [create_neutron_subnet(network, '%s', "
"'%s', '%s')]: %s" % (name, cidr, net_id, e))
return None
-def create_neutron_router(neutron_client, name):
- json_body = {'router': {'name': name, 'admin_state_up': True}}
+def create_neutron_router(conn, name):
try:
- router = neutron_client.create_router(json_body)
- return router['router']['id']
+ router = conn.network.create_router(name=name)
+ return router.id
except Exception as e:
- logger.error("Error [create_neutron_router(neutron_client, '%s')]: %s"
+ logger.error("Error [create_neutron_router(network, '%s')]: %s"
% (name, e))
return None
-def create_neutron_port(neutron_client, name, network_id, ip):
- json_body = {'port': {
- 'admin_state_up': True,
- 'name': name,
- 'network_id': network_id,
- 'fixed_ips': [{"ip_address": ip}]
- }}
+def create_neutron_port(conn, name, network_id, ip):
try:
- port = neutron_client.create_port(body=json_body)
- return port['port']['id']
+ port = conn.network.create_port(name=name,
+ network_id=network_id,
+ fixed_ips=[{'ip_address': ip}])
+ return port.id
except Exception as e:
- logger.error("Error [create_neutron_port(neutron_client, '%s', '%s', "
+ logger.error("Error [create_neutron_port(network, '%s', '%s', "
"'%s')]: %s" % (name, network_id, ip, e))
return None
-def update_neutron_net(neutron_client, network_id, shared=False):
- json_body = {'network': {'shared': shared}}
+def update_neutron_net(conn, network_id, shared=False):
try:
- neutron_client.update_network(network_id, body=json_body)
+ conn.network.update_network(network_id, is_shared=shared)
return True
except Exception as e:
- logger.error("Error [update_neutron_net(neutron_client, '%s', '%s')]: "
+ logger.error("Error [update_neutron_net(network, '%s', '%s')]: "
"%s" % (network_id, str(shared), e))
return False
-def update_neutron_port(neutron_client, port_id, device_owner):
- json_body = {'port': {
- 'device_owner': device_owner,
- }}
+def update_neutron_port(conn, port_id, device_owner):
try:
- port = neutron_client.update_port(port=port_id,
- body=json_body)
- return port['port']['id']
+ port = conn.network.update_port(port_id, device_owner=device_owner)
+ return port.id
except Exception as e:
- logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
+ logger.error("Error [update_neutron_port(network, '%s', '%s')]:"
" %s" % (port_id, device_owner, e))
return None
-def add_interface_router(neutron_client, router_id, subnet_id):
- json_body = {"subnet_id": subnet_id}
+def add_interface_router(conn, router_id, subnet_id):
try:
- neutron_client.add_interface_router(router=router_id, body=json_body)
+ conn.network.add_interface_to_router(router_id, subnet_id=subnet_id)
return True
except Exception as e:
- logger.error("Error [add_interface_router(neutron_client, '%s', "
+ logger.error("Error [add_interface_router(network, '%s', "
"'%s')]: %s" % (router_id, subnet_id, e))
return False
-def add_gateway_router(neutron_client, router_id):
- ext_net_id = get_external_net_id(neutron_client)
+def add_gateway_router(conn, router_id):
+ ext_net_id = get_external_net_id(conn)
router_dict = {'network_id': ext_net_id}
try:
- neutron_client.add_gateway_router(router_id, router_dict)
+ conn.network.update_router(router_id,
+ external_gateway_info=router_dict)
return True
except Exception as e:
- logger.error("Error [add_gateway_router(neutron_client, '%s')]: %s"
+ logger.error("Error [add_gateway_router(network, '%s')]: %s"
% (router_id, e))
return False
-def delete_neutron_net(neutron_client, network_id):
+def delete_neutron_net(conn, network_id):
try:
- neutron_client.delete_network(network_id)
+ conn.network.delete_network(network_id, ignore_missing=False)
return True
except Exception as e:
- logger.error("Error [delete_neutron_net(neutron_client, '%s')]: %s"
+ logger.error("Error [delete_neutron_net(network, '%s')]: %s"
% (network_id, e))
return False
-def delete_neutron_subnet(neutron_client, subnet_id):
+def delete_neutron_subnet(conn, subnet_id):
try:
- neutron_client.delete_subnet(subnet_id)
+ conn.network.delete_subnet(subnet_id, ignore_missing=False)
return True
except Exception as e:
- logger.error("Error [delete_neutron_subnet(neutron_client, '%s')]: %s"
+ logger.error("Error [delete_neutron_subnet(network, '%s')]: %s"
% (subnet_id, e))
return False
-def delete_neutron_router(neutron_client, router_id):
+def delete_neutron_router(conn, router_id):
try:
- neutron_client.delete_router(router=router_id)
+ conn.network.delete_router(router_id, ignore_missing=False)
return True
except Exception as e:
- logger.error("Error [delete_neutron_router(neutron_client, '%s')]: %s"
+ logger.error("Error [delete_neutron_router(network, '%s')]: %s"
% (router_id, e))
return False
-def delete_neutron_port(neutron_client, port_id):
+def delete_neutron_port(conn, port_id):
try:
- neutron_client.delete_port(port_id)
+ conn.network.delete_port(port_id, ignore_missing=False)
return True
except Exception as e:
- logger.error("Error [delete_neutron_port(neutron_client, '%s')]: %s"
+ logger.error("Error [delete_neutron_port(network, '%s')]: %s"
% (port_id, e))
return False
-def remove_interface_router(neutron_client, router_id, subnet_id):
- json_body = {"subnet_id": subnet_id}
+def remove_interface_router(conn, router_id, subnet_id):
try:
- neutron_client.remove_interface_router(router=router_id,
- body=json_body)
+ conn.network.remove_interface_from_router(router_id,
+ subnet_id=subnet_id)
return True
except Exception as e:
- logger.error("Error [remove_interface_router(neutron_client, '%s', "
+ logger.error("Error [remove_interface_router(network, '%s', "
"'%s')]: %s" % (router_id, subnet_id, e))
return False
-def remove_gateway_router(neutron_client, router_id):
+def remove_gateway_router(conn, router_id):
try:
- neutron_client.remove_gateway_router(router_id)
+ conn.network.update_router(router_id, external_gateway_info=None)
return True
except Exception as e:
- logger.error("Error [remove_gateway_router(neutron_client, '%s')]: %s"
+ logger.error("Error [remove_gateway_router(network, '%s')]: %s"
% (router_id, e))
return False
-def create_network_full(neutron_client,
+def create_network_full(conn,
net_name,
subnet_name,
router_name,
@@ -881,45 +799,43 @@ def create_network_full(neutron_client,
dns=['8.8.8.8', '8.8.4.4']):
# Check if the network already exists
- network_id = get_network_id(neutron_client, net_name)
- subnet_id = get_subnet_id(neutron_client, subnet_name)
- router_id = get_router_id(neutron_client, router_name)
+ network_id = get_network_id(conn, net_name)
+ subnet_id = get_subnet_id(conn, subnet_name)
+ router_id = get_router_id(conn, router_name)
if network_id != '' and subnet_id != '' and router_id != '':
logger.info("A network with name '%s' already exists..." % net_name)
else:
- neutron_client.format = 'json'
-
logger.info('Creating neutron network %s...' % net_name)
if network_id == '':
- network_id = create_neutron_net(neutron_client, net_name)
+ network_id = create_neutron_net(conn, net_name)
if not network_id:
return False
logger.debug("Network '%s' created successfully" % network_id)
logger.debug('Creating Subnet....')
if subnet_id == '':
- subnet_id = create_neutron_subnet(neutron_client, subnet_name,
- cidr, network_id, dns)
+ subnet_id = create_neutron_subnet(conn, subnet_name, cidr,
+ network_id, dns)
if not subnet_id:
return None
logger.debug("Subnet '%s' created successfully" % subnet_id)
logger.debug('Creating Router...')
if router_id == '':
- router_id = create_neutron_router(neutron_client, router_name)
+ router_id = create_neutron_router(conn, router_name)
if not router_id:
return None
logger.debug("Router '%s' created successfully" % router_id)
logger.debug('Adding router to subnet...')
- if not add_interface_router(neutron_client, router_id, subnet_id):
+ if not add_interface_router(conn, router_id, subnet_id):
return None
logger.debug("Interface added successfully.")
logger.debug('Adding gateway to router...')
- if not add_gateway_router(neutron_client, router_id):
+ if not add_gateway_router(conn, router_id):
return None
logger.debug("Gateway added successfully.")
@@ -930,15 +846,15 @@ def create_network_full(neutron_client,
def create_shared_network_full(net_name, subnt_name, router_name, subnet_cidr):
- neutron_client = get_neutron_client()
+ conn = get_os_connection()
- network_dic = create_network_full(neutron_client,
+ network_dic = create_network_full(conn,
net_name,
subnt_name,
router_name,
subnet_cidr)
if network_dic:
- if not update_neutron_net(neutron_client,
+ if not update_neutron_net(conn,
network_dic['net_id'],
shared=True):
logger.error("Failed to update network %s..." % net_name)
@@ -956,56 +872,49 @@ def create_shared_network_full(net_name, subnt_name, router_name, subnet_cidr):
# *********************************************
-def get_security_groups(neutron_client):
- try:
- security_groups = neutron_client.list_security_groups()[
- 'security_groups']
- return security_groups
- except Exception as e:
- logger.error("Error [get_security_groups(neutron_client)]: %s" % e)
- return None
+def get_security_groups(conn):
+ return conn.network.security_groups()
-def get_security_group_id(neutron_client, sg_name):
- security_groups = get_security_groups(neutron_client)
+def get_security_group_id(conn, sg_name):
+ security_groups = get_security_groups(conn)
id = ''
for sg in security_groups:
- if sg['name'] == sg_name:
- id = sg['id']
+ if sg.name == sg_name:
+ id = sg.id
break
return id
-def create_security_group(neutron_client, sg_name, sg_description):
- json_body = {'security_group': {'name': sg_name,
- 'description': sg_description}}
+def create_security_group(conn, sg_name, sg_description):
try:
- secgroup = neutron_client.create_security_group(json_body)
- return secgroup['security_group']
+ secgroup = conn.network.\
+ create_security_group(name=sg_name, description=sg_description)
+ return secgroup
except Exception as e:
- logger.error("Error [create_security_group(neutron_client, '%s', "
+ logger.error("Error [create_security_group(network, '%s', "
"'%s')]: %s" % (sg_name, sg_description, e))
return None
-def create_secgroup_rule(neutron_client, sg_id, direction, protocol,
+def create_secgroup_rule(conn, sg_id, direction, protocol,
port_range_min=None, port_range_max=None):
# We create a security group in 2 steps
- # 1 - we check the format and set the json body accordingly
- # 2 - we call neturon client to create the security group
+ # 1 - we check the format and set the secgroup rule attributes accordingly
+ # 2 - we call openstacksdk to create the security group
# Format check
- json_body = {'security_group_rule': {'direction': direction,
- 'security_group_id': sg_id,
- 'protocol': protocol}}
+ secgroup_rule_attrs = {'direction': direction,
+ 'security_group_id': sg_id,
+ 'protocol': protocol}
# parameters may be
# - both None => we do nothing
- # - both Not None => we add them to the json description
+ # - both Not None => we add them to the secgroup rule attributes
# but one cannot be None is the other is not None
if (port_range_min is not None and port_range_max is not None):
- # add port_range in json description
- json_body['security_group_rule']['port_range_min'] = port_range_min
- json_body['security_group_rule']['port_range_max'] = port_range_max
+ # add port_range in secgroup rule attributes
+ secgroup_rule_attrs['port_range_min'] = port_range_min
+ secgroup_rule_attrs['port_range_max'] = port_range_max
logger.debug("Security_group format set (port range included)")
else:
# either both port range are set to None => do nothing
@@ -1022,70 +931,69 @@ def create_secgroup_rule(neutron_client, sg_id, direction, protocol,
# Create security group using neutron client
try:
- neutron_client.create_security_group_rule(json_body)
+ conn.network.create_security_group_rule(**secgroup_rule_attrs)
return True
- except:
+ except Exception:
logger.exception("Impossible to create_security_group_rule,"
"security group rule probably already exists")
return False
-def get_security_group_rules(neutron_client, sg_id):
+def get_security_group_rules(conn, sg_id):
try:
- security_rules = neutron_client.list_security_group_rules()[
- 'security_group_rules']
+ security_rules = conn.network.security_group_rules()
security_rules = [rule for rule in security_rules
- if rule["security_group_id"] == sg_id]
+ if rule.security_group_id == sg_id]
return security_rules
except Exception as e:
- logger.error("Error [get_security_group_rules(neutron_client, sg_id)]:"
+ logger.error("Error [get_security_group_rules(network, sg_id)]:"
" %s" % e)
return None
-def check_security_group_rules(neutron_client, sg_id, direction, protocol,
+def check_security_group_rules(conn, sg_id, direction, protocol,
port_min=None, port_max=None):
try:
- security_rules = get_security_group_rules(neutron_client, sg_id)
+ security_rules = get_security_group_rules(conn, sg_id)
security_rules = [rule for rule in security_rules
- if (rule["direction"].lower() == direction and
- rule["protocol"].lower() == protocol and
- rule["port_range_min"] == port_min and
- rule["port_range_max"] == port_max)]
+ if (rule.direction.lower() == direction and
+ rule.protocol.lower() == protocol and
+ rule.port_range_min == port_min and
+ rule.port_range_max == port_max)]
if len(security_rules) == 0:
return True
else:
return False
except Exception as e:
logger.error("Error [check_security_group_rules("
- " neutron_client, sg_id, direction,"
+ " network, sg_id, direction,"
" protocol, port_min=None, port_max=None)]: "
"%s" % e)
return None
-def create_security_group_full(neutron_client,
+def create_security_group_full(conn,
sg_name, sg_description):
- sg_id = get_security_group_id(neutron_client, sg_name)
+ sg_id = get_security_group_id(conn, sg_name)
if sg_id != '':
logger.info("Using existing security group '%s'..." % sg_name)
else:
logger.info("Creating security group '%s'..." % sg_name)
- SECGROUP = create_security_group(neutron_client,
+ SECGROUP = create_security_group(conn,
sg_name,
sg_description)
if not SECGROUP:
logger.error("Failed to create the security group...")
return None
- sg_id = SECGROUP['id']
+ sg_id = SECGROUP.id
logger.debug("Security group '%s' with ID=%s created successfully."
- % (SECGROUP['name'], sg_id))
+ % (SECGROUP.name, sg_id))
logger.debug("Adding ICMP rules in security group '%s'..."
% sg_name)
- if not create_secgroup_rule(neutron_client, sg_id,
+ if not create_secgroup_rule(conn, sg_id,
'ingress', 'icmp'):
logger.error("Failed to create the security group rule...")
return None
@@ -1093,49 +1001,45 @@ def create_security_group_full(neutron_client,
logger.debug("Adding SSH rules in security group '%s'..."
% sg_name)
if not create_secgroup_rule(
- neutron_client, sg_id, 'ingress', 'tcp', '22', '22'):
+ conn, sg_id, 'ingress', 'tcp', '22', '22'):
logger.error("Failed to create the security group rule...")
return None
if not create_secgroup_rule(
- neutron_client, sg_id, 'egress', 'tcp', '22', '22'):
+ conn, sg_id, 'egress', 'tcp', '22', '22'):
logger.error("Failed to create the security group rule...")
return None
return sg_id
-def add_secgroup_to_instance(nova_client, instance_id, secgroup_id):
+def add_secgroup_to_instance(conn, instance_id, secgroup_id):
try:
- nova_client.servers.add_security_group(instance_id, secgroup_id)
+ conn.compute.add_security_group_to_server(instance_id, secgroup_id)
return True
except Exception as e:
- logger.error("Error [add_secgroup_to_instance(nova_client, '%s', "
+ logger.error("Error [add_secgroup_to_instance(compute, '%s', "
"'%s')]: %s" % (instance_id, secgroup_id, e))
return False
-def update_sg_quota(neutron_client, tenant_id, sg_quota, sg_rule_quota):
- json_body = {"quota": {
- "security_group": sg_quota,
- "security_group_rule": sg_rule_quota
- }}
-
+def update_sg_quota(conn, tenant_id, sg_quota, sg_rule_quota):
try:
- neutron_client.update_quota(tenant_id=tenant_id,
- body=json_body)
+ conn.network.update_quota(tenant_id,
+ security_group_rules=sg_rule_quota,
+ security_groups=sg_quota)
return True
except Exception as e:
- logger.error("Error [update_sg_quota(neutron_client, '%s', '%s', "
+ logger.error("Error [update_sg_quota(network, '%s', '%s', "
"'%s')]: %s" % (tenant_id, sg_quota, sg_rule_quota, e))
return False
-def delete_security_group(neutron_client, secgroup_id):
+def delete_security_group(conn, secgroup_id):
try:
- neutron_client.delete_security_group(secgroup_id)
+ conn.network.delete_security_group(secgroup_id, ignore_missing=False)
return True
except Exception as e:
- logger.error("Error [delete_security_group(neutron_client, '%s')]: %s"
+ logger.error("Error [delete_security_group(network, '%s')]: %s"
% (secgroup_id, e))
return False
@@ -1143,17 +1047,17 @@ def delete_security_group(neutron_client, secgroup_id):
# *********************************************
# GLANCE
# *********************************************
-def get_images(glance_client):
+def get_images(conn):
try:
- images = glance_client.images.list()
+ images = conn.image.images()
return images
except Exception as e:
logger.error("Error [get_images]: %s" % e)
return None
-def get_image_id(glance_client, image_name):
- images = glance_client.images.list()
+def get_image_id(conn, image_name):
+ images = conn.image.images()
id = ''
for i in images:
if i.name == image_name:
@@ -1162,7 +1066,7 @@ def get_image_id(glance_client, image_name):
return id
-def create_glance_image(glance_client,
+def create_glance_image(conn,
image_name,
file_path,
disk="qcow2",
@@ -1173,39 +1077,38 @@ def create_glance_image(glance_client,
logger.error("Error: file %s does not exist." % file_path)
return None
try:
- image_id = get_image_id(glance_client, image_name)
+ image_id = get_image_id(conn, image_name)
if image_id != '':
logger.info("Image %s already exists." % image_name)
else:
logger.info("Creating image '%s' from '%s'..." % (image_name,
file_path))
-
- image = glance_client.images.create(name=image_name,
- visibility=public,
+ with open(file_path) as image_data:
+ image = conn.image.upload_image(name=image_name,
+ is_public=public,
disk_format=disk,
container_format=container,
+ data=image_data,
**extra_properties)
image_id = image.id
- with open(file_path) as image_data:
- glance_client.images.upload(image_id, image_data)
return image_id
except Exception as e:
- logger.error("Error [create_glance_image(glance_client, '%s', '%s', "
+ logger.error("Error [create_glance_image(image, '%s', '%s', "
"'%s')]: %s" % (image_name, file_path, public, e))
return None
def get_or_create_image(name, path, format, extra_properties):
image_exists = False
- glance_client = get_glance_client()
+ conn = get_os_connection()
- image_id = get_image_id(glance_client, name)
+ image_id = get_image_id(conn, name)
if image_id != '':
logger.info("Using existing image '%s'..." % name)
image_exists = True
else:
logger.info("Creating image '%s' from '%s'..." % (name, path))
- image_id = create_glance_image(glance_client,
+ image_id = create_glance_image(conn,
name,
path,
format,
@@ -1219,12 +1122,12 @@ def get_or_create_image(name, path, format, extra_properties):
return image_exists, image_id
-def delete_glance_image(glance_client, image_id):
+def delete_glance_image(conn, image_id):
try:
- glance_client.images.delete(image_id)
+ conn.image.delete_image(image_id)
return True
except Exception as e:
- logger.error("Error [delete_glance_image(glance_client, '%s')]: %s"
+ logger.error("Error [delete_glance_image(image, '%s')]: %s"
% (image_id, e))
return False
@@ -1232,44 +1135,47 @@ def delete_glance_image(glance_client, image_id):
# *********************************************
# CINDER
# *********************************************
-def get_volumes(cinder_client):
+def get_volumes(conn):
try:
- volumes = cinder_client.volumes.list(search_opts={'all_tenants': 1})
+ volumes = conn.block_store.volumes(all_tenants=1)
return volumes
except Exception as e:
- logger.error("Error [get_volumes(cinder_client)]: %s" % e)
+ logger.error("Error [get_volumes(volume)]: %s" % e)
return None
-def update_cinder_quota(cinder_client, tenant_id, vols_quota,
+def update_cinder_quota(cloud, tenant_id, vols_quota,
snapshots_quota, gigabytes_quota):
quotas_values = {"volumes": vols_quota,
"snapshots": snapshots_quota,
"gigabytes": gigabytes_quota}
try:
- cinder_client.quotas.update(tenant_id, **quotas_values)
+ cloud.set_volume_quotas(tenant_id, **quotas_values)
return True
except Exception as e:
- logger.error("Error [update_cinder_quota(cinder_client, '%s', '%s', "
+ logger.error("Error [update_cinder_quota(volume, '%s', '%s', "
"'%s' '%s')]: %s" % (tenant_id, vols_quota,
snapshots_quota, gigabytes_quota, e))
return False
-def delete_volume(cinder_client, volume_id, forced=False):
+def delete_volume(cloud, volume_id, forced=False):
try:
if forced:
try:
- cinder_client.volumes.detach(volume_id)
- except:
+ volume = cloud.get_volume(volume_id)
+ for attachment in volume.attachments:
+ server = cloud.get_server(attachment.server_id)
+ cloud.detach_volume(server, volume)
+ except Exception:
logger.error(sys.exc_info()[0])
- cinder_client.volumes.force_delete(volume_id)
+ cloud.delete_volume(volume_id, force=True)
else:
- cinder_client.volumes.delete(volume_id)
+ cloud.delete_volume(volume_id)
return True
except Exception as e:
- logger.error("Error [delete_volume(cinder_client, '%s', '%s')]: %s"
+ logger.error("Error [delete_volume(volume, '%s', '%s')]: %s"
% (volume_id, str(forced), e))
return False
@@ -1389,9 +1295,9 @@ def get_or_create_tenant_for_vnf(keystone_client, tenant_name,
return True
else:
return False
- except:
+ except Exception:
raise Exception("Impossible to create a Tenant for the VNF {}".format(
- tenant_name))
+ tenant_name))
def create_user(keystone_client, user_name, user_password,
@@ -1449,10 +1355,10 @@ def get_or_create_user_for_vnf(keystone_client, vnf_ref):
role_id = get_role_id(keystone_client, 'admin')
tenant_id = get_tenant_id(keystone_client, vnf_ref)
add_role_user(keystone_client, user_id, role_id, tenant_id)
- except:
+ except Exception:
logger.warn("Cannot associate user to role admin on tenant")
return created
- except:
+ except Exception:
raise Exception("Impossible to create a user for the VNF {}".format(
vnf_ref))
@@ -1498,10 +1404,52 @@ def delete_user(keystone_client, user_id):
# *********************************************
# HEAT
# *********************************************
-def get_resource(heat_client, stack_id, resource):
+def create_stack(conn, **kwargs):
try:
- resources = heat_client.resources.get(stack_id, resource)
- return resources
+ stack = conn.orchestration.create_stack(**kwargs)
+ stack_id = stack.id
+ if stack_id is None:
+ logger.error("Stack create start failed")
+ raise SystemError("Stack create start failed")
+ return stack_id
except Exception as e:
- logger.error("Error [get_resource]: %s" % e)
+ logger.error("Error [create_stack(orchestration)]: %s" % e)
return None
+
+
+def update_stack(conn, stack_id, **kwargs):
+ try:
+ conn.orchestration.update_stack(stack_id, **kwargs)
+ return True
+ except Exception as e:
+ logger.error("Error [update_stack(orchestration)]: %s" % e)
+ return False
+
+
+def delete_stack(conn, stack_id):
+ try:
+ conn.orchestration.delete_stack(stack_id)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_stack(orchestration)]: %s" % e)
+ return False
+
+
+def list_stacks(conn, **kwargs):
+ try:
+ result = conn.orchestration.stacks(**kwargs)
+ return result
+ except Exception as e:
+ logger.error("Error [list_stack(orchestration)]: %s" % e)
+ return None
+
+
+def get_output(conn, stack_id, output_key):
+ try:
+ stack = conn.orchestration.get_stack(stack_id)
+ for output in stack.outputs:
+ if output['output_key'] == output_key:
+ return output
+ except ResourceNotFound as e:
+ logger.error("Error [get_output(orchestration)]: %s" % e)
+ return None
diff --git a/sdnvpn/lib/quagga.py b/sdnvpn/lib/quagga.py
index 0ea206e..6efd6a9 100644
--- a/sdnvpn/lib/quagga.py
+++ b/sdnvpn/lib/quagga.py
@@ -22,12 +22,12 @@ logger = logging.getLogger('sdnvpn-quagga')
COMMON_CONFIG = config.CommonConfig()
-def odl_add_neighbor(neighbor_ip, controller_ip, controller):
- # Explicitly pass controller_ip because controller.ip
+def odl_add_neighbor(neighbor_ip, odl_ip, odl_node):
+ # Explicitly pass odl_ip because odl_node.ip
# Might not be accessible from the Quagga instance
command = 'configure-bgp -op add-neighbor --as-num 200'
- command += ' --ip %s --use-source-ip %s' % (neighbor_ip, controller_ip)
- success = run_odl_cmd(controller, command)
+ command += ' --ip %s --use-source-ip %s' % (neighbor_ip, odl_ip)
+ success = run_odl_cmd(odl_node, command)
# The run_cmd api is really whimsical
logger.info("Maybe stdout of %s: %s", command, success)
return success
@@ -42,20 +42,20 @@ def bootstrap_quagga(fip_addr, controller_ip):
return rc == 0
-def gen_quagga_setup_script(controller_ip,
+def gen_quagga_setup_script(odl_ip,
fake_floating_ip,
ext_net_mask,
ip_prefix, rd, irt, ert):
with open(COMMON_CONFIG.quagga_setup_script_path) as f:
template = f.read()
- script = template % (controller_ip,
- fake_floating_ip,
- ext_net_mask,
- ip_prefix, rd, irt, ert)
+ script = template.format(odl_ip,
+ fake_floating_ip,
+ ext_net_mask,
+ ip_prefix, rd, irt, ert)
return script
-def check_for_peering(controller):
+def check_for_peering(odl_node):
cmd = 'show-bgp --cmd \\"ip bgp neighbors\\"'
tries = 20
neighbors = None
@@ -64,7 +64,7 @@ def check_for_peering(controller):
while tries > 0:
if neighbors and 'Established' in neighbors:
break
- neighbors = run_odl_cmd(controller, cmd)
+ neighbors = run_odl_cmd(odl_node, cmd)
logger.info("Output of %s: %s", cmd, neighbors)
if neighbors:
opens = opens_regex.search(neighbors)
diff --git a/sdnvpn/lib/results.py b/sdnvpn/lib/results.py
index e1a5e5a..924b921 100644
--- a/sdnvpn/lib/results.py
+++ b/sdnvpn/lib/results.py
@@ -17,7 +17,8 @@ logger = logging.getLogger('sdnvpn-results')
class Results(object):
- def __init__(self, line_length):
+ def __init__(self, line_length, conn=None):
+ self.conn = conn
self.line_length = line_length
self.test_result = "PASS"
self.summary = ""
@@ -29,7 +30,8 @@ class Results(object):
vm_source,
vm_target,
expected="PASS", timeout=30):
- ip_target = vm_target.networks.itervalues().next()[0]
+ ip_target = self.conn.compute.get_server(vm_target).\
+ addresses.values()[0][0]['addr']
self.get_ping_status_target_ip(vm_source, vm_target.name,
ip_target, expected, timeout)
@@ -38,8 +40,10 @@ class Results(object):
target_name,
ip_target,
expected="PASS", timeout=30):
- console_log = vm_source.get_console_output()
- ip_source = vm_source.networks.itervalues().next()[0]
+ console_log = self.conn.compute.\
+ get_server_console_output(vm_source)['output']
+ ip_source = self.conn.compute.get_server(vm_source).\
+ addresses.values()[0][0]['addr']
if "request failed" in console_log:
# Normally, cirros displays this message when userdata fails
logger.debug("It seems userdata is not supported in "
@@ -59,7 +63,8 @@ class Results(object):
tab, target_name, ip_target,
tab, expected_result))
while True:
- console_log = vm_source.get_console_output()
+ console_log = self.conn.compute.\
+ get_server_console_output(vm_source)['output']
# the console_log is a long string, we want to take
# the last 4 lines (for example)
lines = console_log.split('\n')
@@ -128,9 +133,12 @@ class Results(object):
def check_ssh_output(self, vm_source, vm_target,
expected, timeout=30):
- console_log = vm_source.get_console_output()
- ip_source = vm_source.networks.itervalues().next()[0]
- ip_target = vm_target.networks.itervalues().next()[0]
+ console_log = self.conn.compute.\
+ get_server_console_output(vm_source)['output']
+ ip_source = self.conn.compute.get_server(vm_source).\
+ addresses.values()[0][0]['addr']
+ ip_target = self.conn.compute.get_server(vm_target).\
+ addresses.values()[0][0]['addr']
if "request failed" in console_log:
# Normally, cirros displays this message when userdata fails
@@ -148,7 +156,8 @@ class Results(object):
tab, vm_target.name, ip_target,
tab, expected))
while True:
- console_log = vm_source.get_console_output()
+ console_log = self.conn.compute.\
+ get_server_console_output(vm_source)['output']
# the console_log is a long string, we want to take
# the last 4 lines (for example)
lines = console_log.split('\n')
diff --git a/sdnvpn/lib/utils.py b/sdnvpn/lib/utils.py
index e43750c..4c35edc 100644
--- a/sdnvpn/lib/utils.py
+++ b/sdnvpn/lib/utils.py
@@ -14,9 +14,12 @@ import time
import requests
import re
import subprocess
+import yaml
from concurrent.futures import ThreadPoolExecutor
+from openstack.exceptions import ResourceNotFound, NotFoundException
from requests.auth import HTTPBasicAuth
+from functest.utils import env
from opnfv.deployment.factory import Factory as DeploymentFactory
from sdnvpn.lib import config as sdnvpn_config
@@ -26,8 +29,10 @@ logger = logging.getLogger('sdnvpn_test_utils')
common_config = sdnvpn_config.CommonConfig()
-ODL_USER = 'admin'
-ODL_PASS = 'admin'
+ODL_USER = env.get('SDN_CONTROLLER_USER')
+ODL_PASSWORD = env.get('SDN_CONTROLLER_PASSWORD')
+ODL_IP = env.get('SDN_CONTROLLER_IP')
+ODL_PORT = env.get('SDN_CONTROLLER_RESTCONFPORT')
executor = ThreadPoolExecutor(5)
@@ -66,9 +71,9 @@ def create_custom_flavor():
common_config.custom_flavor_vcpus)
-def create_net(neutron_client, name):
+def create_net(conn, name):
logger.debug("Creating network %s", name)
- net_id = os_utils.create_neutron_net(neutron_client, name)
+ net_id = os_utils.create_neutron_net(conn, name)
if not net_id:
logger.error(
"There has been a problem when creating the neutron network")
@@ -77,10 +82,10 @@ def create_net(neutron_client, name):
return net_id
-def create_subnet(neutron_client, name, cidr, net_id):
+def create_subnet(conn, name, cidr, net_id):
logger.debug("Creating subnet %s in network %s with cidr %s",
name, net_id, cidr)
- subnet_id = os_utils.create_neutron_subnet(neutron_client,
+ subnet_id = os_utils.create_neutron_subnet(conn,
name,
cidr,
net_id)
@@ -92,12 +97,12 @@ def create_subnet(neutron_client, name, cidr, net_id):
return subnet_id
-def create_network(neutron_client, net, subnet1, cidr1,
+def create_network(conn, net, subnet1, cidr1,
router, subnet2=None, cidr2=None):
"""Network assoc won't work for networks/subnets created by this function.
It is an ODL limitation due to it handling routers as vpns.
See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
- network_dic = os_utils.create_network_full(neutron_client,
+ network_dic = os_utils.create_network_full(conn,
net,
subnet1,
router,
@@ -114,7 +119,7 @@ def create_network(neutron_client, net, subnet1, cidr1,
if subnet2 is not None:
logger.debug("Creating and attaching a second subnet...")
subnet_id = os_utils.create_neutron_subnet(
- neutron_client, subnet2, cidr2, net_id)
+ conn, subnet2, cidr2, net_id)
if not subnet_id:
logger.error(
"There has been a problem when creating the second subnet")
@@ -124,16 +129,15 @@ def create_network(neutron_client, net, subnet1, cidr1,
return net_id, subnet_id, router_id
-def get_port(neutron_client, instance_id):
- ports = os_utils.get_port_list(neutron_client)
- if ports is not None:
- for port in ports:
- if port['device_id'] == instance_id:
- return port
+def get_port(conn, instance_id):
+ ports = os_utils.get_port_list(conn)
+ for port in ports:
+ if port.device_id == instance_id:
+ return port
return None
-def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
+def update_port_allowed_address_pairs(conn, port_id, address_pairs):
if len(address_pairs) <= 0:
return
allowed_address_pairs = []
@@ -141,30 +145,27 @@ def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
address_pair_dict = {'ip_address': address_pair.ipaddress,
'mac_address': address_pair.macaddress}
allowed_address_pairs.append(address_pair_dict)
- json_body = {'port': {
- "allowed_address_pairs": allowed_address_pairs
- }}
try:
- port = neutron_client.update_port(port=port_id,
- body=json_body)
- return port['port']['id']
+ port = conn.network.\
+ update_port(port_id, allowed_address_pairs=allowed_address_pairs)
+ return port.id
except Exception as e:
- logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
+ logger.error("Error [update_neutron_port(network, '%s', '%s')]:"
" %s" % (port_id, address_pairs, e))
return None
-def create_instance(nova_client,
+def create_instance(conn,
name,
image_id,
network_id,
sg_id,
secgroup_name=None,
fixed_ip=None,
- compute_node='',
+ compute_node=None,
userdata=None,
- files=None,
+ files=[],
**kwargs
):
if 'flavor' not in kwargs:
@@ -192,10 +193,12 @@ def create_instance(nova_client,
logger.error("Error while booting instance.")
raise Exception("Error while booting instance {}".format(name))
else:
+ # Retrieve IP of INSTANCE
+ network_name = conn.network.get_network(network_id).name
+ instance_ip = conn.compute.get_server(instance).\
+ addresses.get(network_name)[0]['addr']
logger.debug("Instance '%s' booted successfully. IP='%s'." %
- (name, instance.networks.itervalues().next()[0]))
- # Retrieve IP of INSTANCE
- # instance_ip = instance.networks.get(network_id)[0]
+ (name, instance_ip))
if secgroup_name:
logger.debug("Adding '%s' to security group '%s'..."
@@ -203,7 +206,7 @@ def create_instance(nova_client,
else:
logger.debug("Adding '%s' to security group '%s'..."
% (name, sg_id))
- os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
+ os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
return instance
@@ -297,18 +300,16 @@ def get_installerHandler():
return None
else:
if installer_type in ["apex"]:
- developHandler = DeploymentFactory.get_handler(
- installer_type,
- installer_ip,
- 'root',
- pkey_file="/root/.ssh/id_rsa")
-
- if installer_type in ["fuel"]:
- developHandler = DeploymentFactory.get_handler(
- installer_type,
- installer_ip,
- 'root',
- 'r00tme')
+ installer_user = "root"
+ elif installer_type in ["fuel"]:
+ installer_user = "ubuntu"
+
+ developHandler = DeploymentFactory.get_handler(
+ installer_type,
+ installer_ip,
+ installer_user,
+ pkey_file="/root/.ssh/id_rsa")
+
return developHandler
@@ -321,18 +322,21 @@ def get_installer_ip():
return str(os.environ['INSTALLER_IP'])
-def get_instance_ip(instance):
- instance_ip = instance.networks.itervalues().next()[0]
+def get_instance_ip(conn, instance):
+ instance_ip = conn.compute.get_server(instance).\
+ addresses.values()[0][0]['addr']
return instance_ip
def wait_for_instance(instance, pattern=".* login:", tries=40):
logger.info("Waiting for instance %s to boot up" % instance.id)
+ conn = os_utils.get_os_connection()
sleep_time = 2
expected_regex = re.compile(pattern)
console_log = ""
while tries > 0 and not expected_regex.search(console_log):
- console_log = instance.get_console_output()
+ console_log = conn.compute.\
+ get_server_console_output(instance)['output']
time.sleep(sleep_time)
tries -= 1
@@ -371,6 +375,21 @@ def async_Wait_for_instances(instances, tries=40):
logger.error("one or more instances is not yet booted up")
+def wait_for_instance_delete(conn, instance_id, tries=30):
+ sleep_time = 2
+ instances = [instance_id]
+ logger.debug("Waiting for instance %s to be deleted"
+ % (instance_id))
+ while tries > 0 and instance_id in instances:
+ instances = [instance.id for instance in
+ os_utils.get_instances(conn)]
+ time.sleep(sleep_time)
+ tries -= 1
+ if instance_id in instances:
+ logger.error("Deletion of instance %s failed" %
+ (instance_id))
+
+
def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
tries = 30
sleep_time = 1
@@ -426,10 +445,10 @@ def wait_before_subtest(*args, **kwargs):
time.sleep(30)
-def assert_and_get_compute_nodes(nova_client, required_node_number=2):
+def assert_and_get_compute_nodes(conn, required_node_number=2):
"""Get the compute nodes in the deployment
Exit if the deployment doesn't have enough compute nodes"""
- compute_nodes = os_utils.get_hypervisors(nova_client)
+ compute_nodes = os_utils.get_hypervisors(conn)
num_compute_nodes = len(compute_nodes)
if num_compute_nodes < 2:
@@ -444,13 +463,13 @@ def assert_and_get_compute_nodes(nova_client, required_node_number=2):
return compute_nodes
-def open_icmp(neutron_client, security_group_id):
- if os_utils.check_security_group_rules(neutron_client,
+def open_icmp(conn, security_group_id):
+ if os_utils.check_security_group_rules(conn,
security_group_id,
'ingress',
'icmp'):
- if not os_utils.create_secgroup_rule(neutron_client,
+ if not os_utils.create_secgroup_rule(conn,
security_group_id,
'ingress',
'icmp'):
@@ -460,14 +479,14 @@ def open_icmp(neutron_client, security_group_id):
% security_group_id)
-def open_http_port(neutron_client, security_group_id):
- if os_utils.check_security_group_rules(neutron_client,
+def open_http_port(conn, security_group_id):
+ if os_utils.check_security_group_rules(conn,
security_group_id,
'ingress',
'tcp',
80, 80):
- if not os_utils.create_secgroup_rule(neutron_client,
+ if not os_utils.create_secgroup_rule(conn,
security_group_id,
'ingress',
'tcp',
@@ -479,14 +498,14 @@ def open_http_port(neutron_client, security_group_id):
% security_group_id)
-def open_bgp_port(neutron_client, security_group_id):
- if os_utils.check_security_group_rules(neutron_client,
+def open_bgp_port(conn, security_group_id):
+ if os_utils.check_security_group_rules(conn,
security_group_id,
'ingress',
'tcp',
179, 179):
- if not os_utils.create_secgroup_rule(neutron_client,
+ if not os_utils.create_secgroup_rule(conn,
security_group_id,
'ingress',
'tcp',
@@ -518,17 +537,19 @@ def exec_cmd(cmd, verbose):
return output, success
-def check_odl_fib(ip, controller_ip):
+def check_odl_fib(ip):
"""Check that there is an entry in the ODL Fib for `ip`"""
- url = "http://" + controller_ip + \
- ":8181/restconf/config/odl-fib:fibEntries/"
+ url = ("http://{user}:{password}@{ip}:{port}/restconf/config/"
+ "odl-fib:fibEntries/".format(user=ODL_USER,
+ password=ODL_PASSWORD, ip=ODL_IP,
+ port=ODL_PORT))
logger.debug("Querring '%s' for FIB entries", url)
- res = requests.get(url, auth=(ODL_USER, ODL_PASS))
+ res = requests.get(url, auth=(ODL_USER, ODL_PASSWORD))
if res.status_code != 200:
logger.error("OpenDaylight response status code: %s", res.status_code)
return False
logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
- % controller_ip)
+ % ODL_IP)
logger.debug("OpenDaylight FIB: \n%s" % res.text)
return ip in res.text
@@ -546,7 +567,7 @@ def run_odl_cmd(odl_node, cmd):
return odl_node.run_cmd(karaf_cmd)
-def wait_for_cloud_init(instance):
+def wait_for_cloud_init(conn, instance):
success = True
# ubuntu images take a long time to start
tries = 20
@@ -554,7 +575,8 @@ def wait_for_cloud_init(instance):
logger.info("Waiting for cloud init of instance: {}"
"".format(instance.name))
while tries > 0:
- instance_log = instance.get_console_output()
+ instance_log = conn.compute.\
+ get_server_console_output(instance)['output']
if "Failed to run module" in instance_log:
success = False
logger.error("Cloud init failed to run. Reason: %s",
@@ -577,36 +599,52 @@ def wait_for_cloud_init(instance):
def attach_instance_to_ext_br(instance, compute_node):
- libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+ libvirt_instance_name = instance.instance_name
installer_type = str(os.environ['INSTALLER_TYPE'].lower())
- if installer_type == "fuel":
+ # In Apex, br-ex (or br-floating for Fuel) is an ovs bridge and virsh
+ # attach-interface won't just work. We work around it by creating a linux
+ # bridge, attaching that to br-ex (or br-floating for Fuel) with a
+ # veth pair and virsh-attaching the instance to the linux-bridge
+ if installer_type in ["fuel"]:
+ bridge = "br-floating"
+ elif installer_type in ["apex"]:
bridge = "br-ex"
- elif installer_type == "apex":
- # In Apex, br-ex is an ovs bridge and virsh attach-interface
- # won't just work. We work around it by creating a linux
- # bridge, attaching that to br-ex with a veth pair
- # and virsh-attaching the instance to the linux-bridge
- bridge = "br-quagga"
- cmd = """
- set -e
- if ! sudo brctl show |grep -q ^{bridge};then
- sudo brctl addbr {bridge}
- sudo ip link set {bridge} up
- sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
- sudo ip link set dev ovs-quagga-tap up
- sudo ip link set dev quagga-tap up
- sudo ovs-vsctl add-port br-ex ovs-quagga-tap
- sudo brctl addif {bridge} quagga-tap
- fi
- """
- compute_node.run_cmd(cmd.format(bridge=bridge))
+ else:
+ logger.warn("installer type %s is neither fuel nor apex."
+ % installer_type)
+ return
+
+ cmd = """
+ set -e
+ if ! sudo brctl show |grep -q ^br-quagga;then
+ sudo brctl addbr br-quagga
+ sudo ip link set br-quagga up
+ sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
+ sudo ip link set dev ovs-quagga-tap up
+ sudo ip link set dev quagga-tap up
+ sudo ovs-vsctl add-port {bridge} ovs-quagga-tap
+ sudo brctl addif br-quagga quagga-tap
+ fi
+ """
+ compute_node.run_cmd(cmd.format(bridge=bridge))
compute_node.run_cmd("sudo virsh attach-interface %s"
- " bridge %s" % (libvirt_instance_name, bridge))
+ " bridge br-quagga" % (libvirt_instance_name))
def detach_instance_from_ext_br(instance, compute_node):
- libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+ libvirt_instance_name = instance.instance_name
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ # This function undoes all the actions performed by
+ # attach_instance_to_ext_br on Fuel and Apex installers.
+ if installer_type in ["fuel"]:
+ bridge = "br-floating"
+ elif installer_type in ["apex"]:
+ bridge = "br-ex"
+ else:
+ logger.warn("installer type %s is neither fuel nor apex."
+ % installer_type)
+ return
mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
"grep running | awk '{print $2}'); "
"do echo -n ; sudo virsh dumpxml $vm| "
@@ -615,33 +653,23 @@ def detach_instance_from_ext_br(instance, compute_node):
" --type bridge --mac %s"
% (libvirt_instance_name, mac))
- installer_type = str(os.environ['INSTALLER_TYPE'].lower())
- if installer_type == "fuel":
- bridge = "br-ex"
- elif installer_type == "apex":
- # In Apex, br-ex is an ovs bridge and virsh attach-interface
- # won't just work. We work around it by creating a linux
- # bridge, attaching that to br-ex with a veth pair
- # and virsh-attaching the instance to the linux-bridge
- bridge = "br-quagga"
- cmd = """
- sudo brctl delif {bridge} quagga-tap &&
- sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
- sudo ip link set dev quagga-tap down &&
- sudo ip link set dev ovs-quagga-tap down &&
- sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
- sudo ip link set {bridge} down &&
- sudo brctl delbr {bridge}
- """
- compute_node.run_cmd(cmd.format(bridge=bridge))
-
-
-def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
- subnet_ids, router_ids, network_ids):
+ cmd = """
+ sudo brctl delif br-quagga quagga-tap &&
+ sudo ovs-vsctl del-port {bridge} ovs-quagga-tap &&
+ sudo ip link set dev quagga-tap down &&
+ sudo ip link set dev ovs-quagga-tap down &&
+ sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
+ sudo ip link set br-quagga down &&
+ sudo brctl delbr br-quagga
+ """
+ compute_node.run_cmd(cmd.format(bridge=bridge))
+
+def cleanup_neutron(conn, neutron_client, floatingip_ids, bgpvpn_ids,
+ interfaces, subnet_ids, router_ids, network_ids):
if len(floatingip_ids) != 0:
for floatingip_id in floatingip_ids:
- if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
+ if not os_utils.delete_floating_ip(conn, floatingip_id):
logger.error('Fail to delete all floating ips. '
'Floating ip with id {} was not deleted.'.
format(floatingip_id))
@@ -653,7 +681,7 @@ def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
if len(interfaces) != 0:
for router_id, subnet_id in interfaces:
- if not os_utils.remove_interface_router(neutron_client,
+ if not os_utils.remove_interface_router(conn,
router_id, subnet_id):
logger.error('Fail to delete all interface routers. '
'Interface router with id {} was not deleted.'.
@@ -661,14 +689,14 @@ def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
if len(router_ids) != 0:
for router_id in router_ids:
- if not os_utils.remove_gateway_router(neutron_client, router_id):
+ if not os_utils.remove_gateway_router(conn, router_id):
logger.error('Fail to delete all gateway routers. '
'Gateway router with id {} was not deleted.'.
format(router_id))
if len(subnet_ids) != 0:
for subnet_id in subnet_ids:
- if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
+ if not os_utils.delete_neutron_subnet(conn, subnet_id):
logger.error('Fail to delete all subnets. '
'Subnet with id {} was not deleted.'.
format(subnet_id))
@@ -676,7 +704,7 @@ def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
if len(router_ids) != 0:
for router_id in router_ids:
- if not os_utils.delete_neutron_router(neutron_client, router_id):
+ if not os_utils.delete_neutron_router(conn, router_id):
logger.error('Fail to delete all routers. '
'Router with id {} was not deleted.'.
format(router_id))
@@ -684,7 +712,7 @@ def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
if len(network_ids) != 0:
for network_id in network_ids:
- if not os_utils.delete_neutron_net(neutron_client, network_id):
+ if not os_utils.delete_neutron_net(conn, network_id):
logger.error('Fail to delete all networks. '
'Network with id {} was not deleted.'.
format(network_id))
@@ -692,24 +720,25 @@ def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
return True
-def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
+def cleanup_nova(conn, instance_ids, flavor_ids=None):
if flavor_ids is not None and len(flavor_ids) != 0:
for flavor_id in flavor_ids:
- nova_client.flavors.delete(flavor_id)
+ conn.compute.delete_flavor(flavor_id)
if len(instance_ids) != 0:
for instance_id in instance_ids:
- if not os_utils.delete_instance(nova_client, instance_id):
+ if not os_utils.delete_instance(conn, instance_id):
logger.error('Fail to delete all instances. '
'Instance with id {} was not deleted.'.
format(instance_id))
- return False
+ else:
+ wait_for_instance_delete(conn, instance_id)
return True
-def cleanup_glance(glance_client, image_ids):
+def cleanup_glance(conn, image_ids):
if len(image_ids) != 0:
for image_id in image_ids:
- if not os_utils.delete_glance_image(glance_client, image_id):
+ if not os_utils.delete_glance_image(conn, image_id):
logger.error('Fail to delete all images. '
'Image with id {} was not deleted.'.
format(image_id))
@@ -772,6 +801,15 @@ def is_fail_mode_secure():
if not openstack_node.is_active():
continue
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ if installer_type in ['fuel']:
+ if (
+ 'controller' in openstack_node.roles or
+ 'opendaylight' in openstack_node.roles or
+ 'installer' in openstack_node.roles
+ ):
+ continue
+
ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
strip().split('\n'))
if 'br-int' in ovs_int_list:
@@ -789,55 +827,49 @@ def is_fail_mode_secure():
return is_secure
-def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
+def update_nw_subnet_port_quota(conn, tenant_id, nw_quota,
subnet_quota, port_quota, router_quota):
- json_body = {"quota": {
- "network": nw_quota,
- "subnet": subnet_quota,
- "port": port_quota,
- "router": router_quota
- }}
-
try:
- neutron_client.update_quota(tenant_id=tenant_id,
- body=json_body)
+ conn.network.update_quota(tenant_id, networks=nw_quota,
+ subnets=subnet_quota, ports=port_quota,
+ routers=router_quota)
return True
except Exception as e:
- logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
+ logger.error("Error [update_nw_subnet_port_quota(network,"
" '%s', '%s', '%s', '%s, %s')]: %s" %
(tenant_id, nw_quota, subnet_quota,
port_quota, router_quota, e))
return False
-def update_instance_quota_class(nova_client, instances_quota):
+def update_instance_quota_class(cloud, instances_quota):
try:
- nova_client.quota_classes.update("default", instances=instances_quota)
+ cloud.set_compute_quotas('admin', instances=instances_quota)
return True
except Exception as e:
- logger.error("Error [update_instance_quota_class(nova_client,"
+ logger.error("Error [update_instance_quota_class(compute,"
" '%s' )]: %s" % (instances_quota, e))
return False
-def get_neutron_quota(neutron_client, tenant_id):
+def get_neutron_quota(conn, tenant_id):
try:
- return neutron_client.show_quota(tenant_id=tenant_id)['quota']
- except Exception as e:
- logger.error("Error in getting neutron quota for tenant "
+ return conn.network.get_quota(tenant_id)
+ except ResourceNotFound as e:
+ logger.error("Error in getting network quota for tenant "
" '%s' )]: %s" % (tenant_id, e))
raise
-def get_nova_instances_quota(nova_client):
+def get_nova_instances_quota(cloud):
try:
- return nova_client.quota_classes.get("default").instances
+ return cloud.get_compute_quotas('admin').instances
except Exception as e:
logger.error("Error in getting nova instances quota: %s" % e)
raise
-def update_router_extra_route(neutron_client, router_id, extra_routes):
+def update_router_extra_route(conn, router_id, extra_routes):
if len(extra_routes) <= 0:
return
routes_list = []
@@ -845,26 +877,19 @@ def update_router_extra_route(neutron_client, router_id, extra_routes):
route_dict = {'destination': extra_route.destination,
'nexthop': extra_route.nexthop}
routes_list.append(route_dict)
- json_body = {'router': {
- "routes": routes_list
- }}
try:
- neutron_client.update_router(router_id, body=json_body)
+ conn.network.update_router(router_id, routes=routes_list)
return True
except Exception as e:
logger.error("Error in updating router with extra route: %s" % e)
raise
-def update_router_no_extra_route(neutron_client, router_ids):
- json_body = {'router': {
- "routes": [
- ]}}
-
+def update_router_no_extra_route(conn, router_ids):
for router_id in router_ids:
try:
- neutron_client.update_router(router_id, body=json_body)
+ conn.network.update_router(router_id, routes=[])
return True
except Exception as e:
logger.error("Error in clearing extra route: %s" % e)
@@ -904,25 +929,42 @@ def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
return cmd_out_lines
-def get_odl_bgp_entity_owner(controllers):
+def get_node_ip_and_netmask(node, iface):
+ cmd = "ip a | grep {iface} | grep inet | awk '{{print $2}}'"\
+ .format(iface=iface)
+ mgmt_net_cidr = node.run_cmd(cmd).strip().split('\n')
+ mgmt_ip = mgmt_net_cidr[0].split('/')[0]
+ mgmt_netmask = mgmt_net_cidr[0].split('/')[1]
+
+ return mgmt_ip, mgmt_netmask
+
+
+def get_odl_bgp_entity_owner(odl_nodes):
""" Finds the ODL owner of the BGP entity in the cluster.
When ODL runs in clustering mode we need to execute the BGP speaker
related commands to that ODL which is the owner of the BGP entity.
- :param controllers: list of OS controllers
- :return controller: OS controller in which ODL BGP entity owner runs
+ :param odl_nodes: list of Opendaylight nodes
+ :return odl_node: Opendaylight node in which ODL BGP entity owner runs
"""
- if len(controllers) == 1:
- return controllers[0]
+ if len(odl_nodes) == 1:
+ return odl_nodes[0]
else:
- url = ('http://admin:admin@{ip}:8081/restconf/'
+ url = ('http://{user}:{password}@{ip}:{port}/restconf/'
'operational/entity-owners:entity-owners/entity-type/bgp'
- .format(ip=controllers[0].ip))
+ .format(user=ODL_USER, password=ODL_PASSWORD, ip=ODL_IP,
+ port=ODL_PORT))
+
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ if installer_type in ['apex']:
+ node_user = 'heat-admin'
+ elif installer_type in ['fuel']:
+ node_user = 'ubuntu'
remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
'initial/akka.conf')
- remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
+ remote_odl_home_akka_conf = '/home/{0}/akka.conf'.format(node_user)
local_tmp_akka_conf = '/tmp/akka.conf'
try:
json_output = requests.get(url).json()
@@ -932,33 +974,43 @@ def get_odl_bgp_entity_owner(controllers):
return None
odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
- for controller in controllers:
-
- controller.run_cmd('sudo cp {0} /home/heat-admin/'
- .format(remote_odl_akka_conf))
- controller.run_cmd('sudo chmod 777 {0}'
- .format(remote_odl_home_akka_conf))
- controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
+ for odl_node in odl_nodes:
+ if installer_type in ['apex']:
+ get_odl_id_cmd = 'sudo docker ps -qf name=opendaylight_api'
+ odl_id = odl_node.run_cmd(get_odl_id_cmd)
+ odl_node.run_cmd('sudo docker cp '
+ '{container_id}:{odl_akka_conf} '
+ '/home/{user}/'
+ .format(container_id=odl_id,
+ odl_akka_conf=remote_odl_akka_conf,
+ user=node_user))
+ elif installer_type in ['fuel']:
+ odl_node.run_cmd('sudo cp {0} /home/{1}/'
+ .format(remote_odl_akka_conf, node_user))
+ odl_node.run_cmd('sudo chmod 777 {0}'
+ .format(remote_odl_home_akka_conf))
+ odl_node.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
for line in open(local_tmp_akka_conf):
if re.search(odl_bgp_owner, line):
- return controller
+ return odl_node
return None
-def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
+def add_quagga_external_gre_end_point(odl_nodes, remote_tep_ip):
json_body = {'input':
{'destination-ip': remote_tep_ip,
'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
}
- url = ('http://{ip}:8081/restconf/operations/'
- 'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
+ url = ('http://{ip}:{port}/restconf/operations/'
+ 'itm-rpc:add-external-tunnel-endpoint'.format(ip=ODL_IP,
+ port=ODL_PORT))
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
try:
requests.post(url, data=json.dumps(json_body),
headers=headers,
- auth=HTTPBasicAuth('admin', 'admin'))
+ auth=HTTPBasicAuth(ODL_USER, ODL_PASSWORD))
except Exception as e:
logger.error("Failed to create external tunnel endpoint on"
" ODL for external tep ip %s with error %s"
@@ -966,9 +1018,11 @@ def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
return None
-def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
- url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
- 'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
+def is_fib_entry_present_on_odl(odl_nodes, ip_prefix, vrf_id):
+ url = ('http://{user}:{password}@{ip}:{port}/restconf/config/'
+ 'odl-fib:fibEntries/vrfTables/{vrf}/'
+ .format(user=ODL_USER, password=ODL_PASSWORD, ip=ODL_IP,
+ port=ODL_PORT, vrf=vrf_id))
logger.error("url is %s" % url)
try:
vrf_table = requests.get(url).json()
@@ -982,3 +1036,135 @@ def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
logger.error('Failed to find ip prefix %s with error %s'
% (ip_prefix, e))
return False
+
+
+def wait_stack_for_status(conn, stack_id, stack_status, limit=12):
+ """ Waits to reach specified stack status. To be used with
+ CREATE_COMPLETE and UPDATE_COMPLETE.
+ Will try a specific number of attempts at 10sec intervals
+ (default 2min)
+
+ :param stack_id: the stack id returned by create_stack api call
+ :param stack_status: the stack status waiting for
+ :param limit: the maximum number of attempts
+ """
+ logger.debug("Stack '%s' create started" % stack_id)
+
+ stack_create_complete = False
+ attempts = 0
+ while attempts < limit:
+ try:
+ stack_st = conn.orchestration.get_stack(stack_id).status
+ except NotFoundException:
+ logger.error("Stack create failed")
+ raise SystemError("Stack create failed")
+ return False
+ if stack_st == stack_status:
+ stack_create_complete = True
+ break
+ attempts += 1
+ time.sleep(10)
+
+ logger.debug("Stack status check: %s times" % attempts)
+ if stack_create_complete is False:
+ logger.error("Stack create failed")
+ raise SystemError("Stack create failed")
+ return False
+
+ return True
+
+
+def delete_stack_and_wait(conn, stack_id, limit=12):
+ """ Starts and waits for completion of delete stack
+
+ Will try a specific number of attempts at 10sec intervals
+ (default 2min)
+
+ :param stack_id: the id of the stack to be deleted
+ :param limit: the maximum number of attempts
+ """
+ delete_started = False
+ if stack_id is not None:
+ delete_started = os_utils.delete_stack(conn, stack_id)
+
+ if delete_started is True:
+ logger.debug("Stack delete succesfully started")
+ else:
+ logger.error("Stack delete start failed")
+
+ stack_delete_complete = False
+ attempts = 0
+ while attempts < limit:
+ try:
+ stack_st = conn.orchestration.get_stack(stack_id).status
+ if stack_st == 'DELETE_COMPLETE':
+ stack_delete_complete = True
+ break
+ attempts += 1
+ time.sleep(10)
+ except NotFoundException:
+ stack_delete_complete = True
+ break
+
+ logger.debug("Stack status check: %s times" % attempts)
+ if not stack_delete_complete:
+ logger.error("Stack delete failed")
+ raise SystemError("Stack delete failed")
+ return False
+
+ return True
+
+
+def get_heat_environment(testcase, common_config):
+ """ Reads the heat parameters of a testcase into a yaml object
+
+ Each testcase where Heat Orchestratoin Template (HOT) is introduced
+ has an associated parameters section.
+ Reads testcase.heat_parameters section and read COMMON_CONFIG.flavor
+ and place it under parameters tree.
+
+ :param testcase: the tescase for which the HOT file is fetched
+ :param common_config: the common config section
+ :return environment: a yaml object to be used as environment
+ """
+ fl = common_config.default_flavor
+ param_dict = testcase.heat_parameters
+ param_dict['flavor'] = fl
+ env_dict = {'parameters': param_dict}
+ return env_dict
+
+
+def get_vms_from_stack_outputs(conn, stack_id, vm_stack_output_keys):
+ """ Converts a vm name from a heat stack output to a nova vm object
+
+ :param stack_id: the id of the stack to fetch the vms from
+ :param vm_stack_output_keys: a list of stack outputs with the vm names
+ :return vms: a list of vm objects corresponding to the outputs
+ """
+ vms = []
+ for vmk in vm_stack_output_keys:
+ vm_output = os_utils.get_output(conn, stack_id, vmk)
+ if vm_output is not None:
+ vm_name = vm_output['output_value']
+ logger.debug("vm '%s' read from heat output" % vm_name)
+ vm = os_utils.get_instance_by_name(conn, vm_name)
+ if vm is not None:
+ vms.append(vm)
+ return vms
+
+
+def merge_yaml(y1, y2):
+ """ Merge two yaml HOT into one
+
+ The parameters, resources and outputs sections are merged.
+
+ :param y1: the first yaml
+ :param y2: the second yaml
+ :return y: merged yaml
+ """
+ d1 = yaml.load(y1)
+ d2 = yaml.load(y2)
+ for key in ('parameters', 'resources', 'outputs'):
+ if key in d2:
+ d1[key].update(d2[key])
+ return yaml.dump(d1, default_flow_style=False)
diff --git a/sdnvpn/sh_utils/fetch-log-script.sh b/sdnvpn/sh_utils/fetch-log-script.sh
index c3c037d..9b0dc74 100755
--- a/sdnvpn/sh_utils/fetch-log-script.sh
+++ b/sdnvpn/sh_utils/fetch-log-script.sh
@@ -107,7 +107,11 @@ node(){
fi
done
# not all messages only tail the last 10k lines
- tail -n 10000 /var/log/messages > messages
+ if [ -f /var/log/messages ]; then
+ tail -n 10000 /var/log/messages > messages
+ elif [ -f /var/log/syslog ]; then
+ tail -n 10000 /var/log/syslog > messages
+ fi
}
_curl_data_store(){
@@ -137,7 +141,11 @@ datastore()
dump=$tmp_folder/dump-$HOSTNAME.txt
operational=$tmp_folder/Operational-Inventory-$HOSTNAME.txt
karaf_output=$tmp_folder/Karaf_out-$HOSTNAME.txt
- odl_ip_port=$(grep ^url= /etc/neutron/plugins/ml2/ml2_conf.ini |cut -d '/' -f3)
+ if [ -f /etc/neutron/plugins/ml2/ml2_conf.ini ]; then
+ odl_ip_port=$(grep ^url= /etc/neutron/plugins/ml2/ml2_conf.ini |cut -d '/' -f3)
+ else
+ odl_ip_port=$(netstat -tln | grep '8080\|8081\|8181\|8282' | awk 'NR==1 {print $4}')
+ fi
config_urls=( restconf/config/neutron:neutron/networks/ restconf/config/neutron:neutron/subnets/ restconf/config/neutron:neutron/ports/ restconf/config/neutron:neutron/routers/ restconf/config/itm:transport-zones/ restconf/config/itm-state:tunnels_state/ restconf/config/itm-state:external-tunnel-list/ restconf/config/itm-state:dpn-endpoints/ restconf/config/itm-config:vtep-config-schemas/ restconf/config/itm-config:tunnel-monitor-enabled/ restconf/config/itm-config:tunnel-monitor-interval/ restconf/config/interface-service-bindings:service-bindings/ restconf/config/l3vpn:vpn-instances/ restconf/config/ietf-interfaces:interfaces/ restconf/config/l3vpn:vpn-interfaces/ restconf/config/odl-fib:fibEntries restconf/config/neutronvpn:networkMaps restconf/config/neutronvpn:subnetmaps restconf/config/neutronvpn:vpnMaps restconf/config/neutronvpn:neutron-port-data restconf/config/id-manager:id-pools/ restconf/config/elan:elan-instances/ restconf/config/elan:elan-interfaces/ restconf/config/elan:elan-state/ restconf/config/elan:elan-forwarding-tables/ restconf/config/elan:elan-interface-forwarding-entries/ restconf/config/elan:elan-dpn-interfaces/ restconf/config/elan:elan-tag-name-map/ restconf/config/odl-nat:external-networks/ restconf/config/odl-nat:ext-routers/ restconf/config/odl-nat:intext-ip-port-map/ restconf/config/odl-nat:snatint-ip-port-map/ restconf/config/odl-l3vpn:vpn-instance-to-vpn-id/ restconf/config/neutronvpn:neutron-router-dpns/ restconf/operational/itm-config:tunnel-monitor-interval/ restconf/config/itm-config:tunnel-monitor-interval/ restconf/operational/itm-config:tunnel-monitor-params/ restconf/config/itm-config:tunnel-monitor-params/ restconf/config/vpnservice-dhcp:designated-switches-for-external-tunnels/ restconf/config/neutron:neutron/security-groups/ restconf/config/neutron:neutron/security-rules/ restconf/config/network-topology:network-topology/topology/hwvtep:1 restconf/config/network-topology:network-topology/topology/ovsdb:1 )
diff --git a/sdnvpn/test/functest/config.yaml b/sdnvpn/test/functest/config.yaml
index e910c77..3d2fd8b 100644
--- a/sdnvpn/test/functest/config.yaml
+++ b/sdnvpn/test/functest/config.yaml
@@ -1,13 +1,8 @@
+---
defaults:
- flavor: m1.tiny # adapt to your environment
+ flavor: m1.tiny # adapt to your environment
testcases:
- sdnvpn.test.functest.run_tempest:
- enabled: true
- order: 0
- description: Neutron BGPVPN tests in tempest
- image_name: bgpvpn-tempest-image
-
sdnvpn.test.functest.testcase_1:
enabled: true
order: 1
@@ -32,6 +27,31 @@ testcases:
targets2: '55:55'
route_distinguishers: '11:11'
+ sdnvpn.test.functest.testcase_1bis:
+ enabled: true
+ order: 14
+ description: Test bed for HOT introduction - same tests as case 1
+ image_name: sdnvpn-image
+ stack_name: stack-1bis
+ hot_file_name: artifacts/testcase_1bis.yaml
+ heat_parameters:
+ instance_1_name: sdnvpn-1-1
+ instance_2_name: sdnvpn-1-2
+ instance_3_name: sdnvpn-1-3
+ instance_4_name: sdnvpn-1-4
+ instance_5_name: sdnvpn-1-5
+ net_1_name: sdnvpn-1-1-net
+ subnet_1_name: sdnvpn-1-1-subnet
+ subnet_1_cidr: 10.10.10.0/24
+ net_2_name: sdnvpn-1-2-net
+ subnet_2_name: sdnvpn-1-2-subnet
+ subnet_2_cidr: 10.10.11.0/24
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
+ targets1: '88:88'
+ targets2: '55:55'
+ route_distinguishers: '11:11'
+
sdnvpn.test.functest.testcase_2:
enabled: true
order: 2
@@ -66,6 +86,43 @@ testcases:
route_distinguishers1: '111:111'
route_distinguishers2: '222:222'
+ sdnvpn.test.functest.testcase_2bis:
+ enabled: true
+ order: 15
+ description: Tenant separation -same as test case 2
+ image_name: sdnvpn-image
+ stack_name: stack-2bis
+ hot_file_name: artifacts/testcase_2bis.yaml
+ heat_parameters:
+ instance_1_name: sdnvpn-2-1
+ instance_2_name: sdnvpn-2-2
+ instance_3_name: sdnvpn-2-3
+ instance_4_name: sdnvpn-2-4
+ instance_5_name: sdnvpn-2-5
+ instance_1_ip: 10.10.10.11
+ instance_2_ip: 10.10.10.12
+ instance_3_ip: 10.10.11.13
+ instance_4_ip: 10.10.10.12
+ instance_5_ip: 10.10.11.13
+ net_1_name: sdnvpn-2-1-net
+ subnet_1a_name: sdnvpn-2-1a-subnet
+ subnet_1a_cidr: 10.10.10.0/24
+ subnet_1b_name: sdnvpn-2-1b-subnet
+ subnet_1b_cidr: 10.10.11.0/24
+ router_1_name: sdnvpn-2-1-router
+ net_2_name: sdnvpn-2-2-net
+ subnet_2a_name: sdnvpn-2-2a-subnet
+ subnet_2a_cidr: 10.10.11.0/24
+ subnet_2b_name: sdnvpn-2-2b-subnet
+ subnet_2b_cidr: 10.10.10.0/24
+ router_2_name: sdnvpn-2-2-router
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
+ targets1: '88:88'
+ targets2: '55:55'
+ route_distinguishers1: '111:111'
+ route_distinguishers2: '222:222'
+
sdnvpn.test.functest.testcase_3:
enabled: true
order: 3
@@ -97,7 +154,8 @@ testcases:
sdnvpn.test.functest.testcase_4:
enabled: true
order: 4
- description: VPN provides connectivity between subnets using router association
+ description: "VPN provides connectivity between subnets using router \
+ association"
instance_1_name: sdnvpn-4-1
instance_2_name: sdnvpn-4-2
instance_3_name: sdnvpn-4-3
@@ -118,6 +176,32 @@ testcases:
targets2: '55:55'
route_distinguishers: '12:12'
+ sdnvpn.test.functest.testcase_4bis:
+ enabled: true
+ order: 17
+ description: Test bed for HOT introduction - same tests as case 4
+ image_name: sdnvpn-image
+ stack_name: stack-4bis
+ hot_file_name: artifacts/testcase_4bis.yaml
+ heat_parameters:
+ instance_1_name: sdnvpn-4-1
+ instance_2_name: sdnvpn-4-2
+ instance_3_name: sdnvpn-4-3
+ instance_4_name: sdnvpn-4-4
+ instance_5_name: sdnvpn-4-5
+ net_1_name: sdnvpn-4-1-net
+ subnet_1_name: sdnvpn-4-1-subnet
+ subnet_1_cidr: 10.10.10.0/24
+ router_1_name: sdnvpn-4-1-router
+ net_2_name: sdnvpn-4-2-net
+ subnet_2_name: sdnvpn-4-2-subnet
+ subnet_2_cidr: 10.10.11.0/24
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
+ targets1: '88:88'
+ targets2: '55:55'
+ route_distinguishers: '12:12'
+
sdnvpn.test.functest.testcase_7:
enabled: false
order: 7
@@ -158,15 +242,41 @@ testcases:
targets: '88:88'
route_distinguishers: '18:18'
+ sdnvpn.test.functest.testcase_8bis:
+ enabled: true
+ order: 21
+ description: "Test floating IP and router assoc coexistence \
+ same as test case 8"
+ image_name: sdnvpn-image
+ stack_name: stack-8bis
+ hot_file_name: artifacts/testcase_8bis.yaml
+ hot_update_file_name: artifacts/testcase_8bis_upd.yaml
+ heat_parameters:
+ instance_1_name: sdnvpn-8-1
+ instance_2_name: sdnvpn-8-2
+ net_1_name: sdnvpn-8-1
+ subnet_1_name: sdnvpn-8-1-subnet
+ subnet_1_cidr: 10.10.10.0/24
+ router_1_name: sdnvpn-8-1-router
+ net_2_name: sdnvpn-8-2
+ subnet_2_name: sdnvpn-8-2-subnet
+ subnet_2_cidr: 10.10.20.0/24
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
+ targets: '88:88'
+ route_distinguishers: '18:18'
+
sdnvpn.test.functest.testcase_9:
enabled: true
order: 9
- description: Verify that all OpenStack nodes OVS br-int have fail_mode set to secure.
+ description: "Verify that all OpenStack nodes OVS br-int have fail_mode \
+ set to secure."
sdnvpn.test.functest.testcase_10:
enabled: true
order: 10
- description: Test if interupts occure during ping, when removing and adding instances
+ description: "Test if interupts occure during ping, when removing and \
+ adding instances"
instance_1_name: sdnvpn-10-1
instance_2_name: sdnvpn-10-2
instance_3_name: sdnvpn-10-3
@@ -182,7 +292,8 @@ testcases:
sdnvpn.test.functest.testcase_11:
enabled: true
order: 11
- description: Check relevant OVS groups are removed upon deletion of OpenStack topology
+ description: "Check relevant OVS groups are removed upon deletion of \
+ OpenStack topology"
instance_1_name: sdnvpn-11-1
instance_2_name: sdnvpn-11-2
image_name: sdnvpn-image
@@ -233,5 +344,5 @@ testcases:
targets1: '88:88'
targets2: '88:88'
route_distinguishers:
- - '12:12'
- - '13:13'
+ - '12:12'
+ - '13:13'
diff --git a/sdnvpn/test/functest/run_sdnvpn_tests.py b/sdnvpn/test/functest/run_sdnvpn_tests.py
index c05876d..b1b242e 100644
--- a/sdnvpn/test/functest/run_sdnvpn_tests.py
+++ b/sdnvpn/test/functest/run_sdnvpn_tests.py
@@ -31,23 +31,23 @@ class SdnvpnFunctest(feature.Feature):
def execute(self):
- nova_client = os_utils.get_nova_client()
- neutron_client = os_utils.get_neutron_client()
+ cloud = os_utils.get_os_cloud()
+ conn = os_utils.get_os_connection()
tenant_id = os_utils.get_tenant_id(os_utils.get_keystone_client(),
os.environ['OS_PROJECT_NAME'])
- neutron_quota = test_utils.get_neutron_quota(neutron_client, tenant_id)
+ neutron_quota = test_utils.get_neutron_quota(conn, tenant_id)
(neutron_nw_quota, neutron_subnet_quota, neutron_port_quota,
neutron_router_quota) = (
- neutron_quota['network'], neutron_quota['subnet'],
- neutron_quota['port'], neutron_quota['router'])
- instances_quota = test_utils.get_nova_instances_quota(nova_client)
+ neutron_quota.networks, neutron_quota.subnets,
+ neutron_quota.ports, neutron_quota.routers)
+ instances_quota = test_utils.get_nova_instances_quota(cloud)
logger.info("Setting net/subnet/port/router "
"quota to unlimited")
test_utils.update_nw_subnet_port_quota(
- neutron_client,
+ conn,
tenant_id,
COMMON_CONFIG.neutron_nw_quota,
COMMON_CONFIG.neutron_subnet_quota,
@@ -59,35 +59,27 @@ class SdnvpnFunctest(feature.Feature):
# https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-115
logger.info("Setting instances quota class to unlimited")
test_utils.update_instance_quota_class(
- nova_client,
- COMMON_CONFIG.nova_instances_quota_class)
+ cloud, COMMON_CONFIG.nova_instances_quota_class)
# Clean up the stale floating ip's so that required
# ip addresses are available for sdnvpn testcases
logger.info("Cleaning up the Floating IP Addresses")
- floating_ips = os_utils.get_floating_ips(neutron_client)
- if floating_ips is not None:
- for floating_ip in floating_ips:
- os_utils.delete_floating_ip(
- neutron_client, floating_ip['id'])
+ floating_ips = os_utils.get_floating_ips(conn)
+ for floating_ip in floating_ips:
+ os_utils.delete_floating_ip(conn, floating_ip.id)
# Workaround for
# https://jira.opnfv.org/browse/SNAPS-318
# Clean up the stale routers
logger.info("Cleaning up the stale routers")
- ports = os_utils.get_port_list(neutron_client)
- if ports is not None:
- for port in ports:
- if port['device_owner'] == 'network:router_interface':
- os_utils.delete_neutron_port(
- neutron_client, port['id'])
- routers = os_utils.get_router_list(neutron_client)
- if routers is not None:
- for router in routers:
- os_utils.remove_gateway_router(
- neutron_client, router['id'])
- os_utils.delete_neutron_router(
- neutron_client, router['id'])
+ ports = os_utils.get_port_list(conn)
+ for port in ports:
+ if port.device_owner == 'network:router_interface':
+ os_utils.delete_neutron_port(conn, port.id)
+ routers = os_utils.get_router_list(conn)
+ for router in routers:
+ os_utils.remove_gateway_router(conn, router.id)
+ os_utils.delete_neutron_router(conn, router.id)
with open(COMMON_CONFIG.config_file) as f:
config_yaml = yaml.safe_load(f)
@@ -133,7 +125,7 @@ class SdnvpnFunctest(feature.Feature):
overall_status = "FAIL"
logger.info("Resetting subnet/net/port quota")
- test_utils.update_nw_subnet_port_quota(neutron_client,
+ test_utils.update_nw_subnet_port_quota(conn,
tenant_id,
neutron_nw_quota,
neutron_subnet_quota,
@@ -141,7 +133,7 @@ class SdnvpnFunctest(feature.Feature):
neutron_router_quota)
logger.info("Resetting instances quota class")
- test_utils.update_instance_quota_class(nova_client, instances_quota)
+ test_utils.update_instance_quota_class(cloud, instances_quota)
try:
installer_type = str(os.environ['INSTALLER_TYPE'].lower())
diff --git a/sdnvpn/test/functest/run_tempest.py b/sdnvpn/test/functest/run_tempest.py
deleted file mode 100644
index 15d4eda..0000000
--- a/sdnvpn/test/functest/run_tempest.py
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (c) 2018 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-#
-import ConfigParser
-import logging
-import os
-import re
-import shutil
-
-import functest.opnfv_tests.openstack.tempest.conf_utils as tempest_utils
-
-from sdnvpn.lib import config as sdnvpn_config
-from sdnvpn.lib import openstack_utils as os_utils
-
-
-logger = logging.getLogger('sdnvpn-tempest')
-
-COMMON_CONFIG = sdnvpn_config.CommonConfig()
-TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
- 'sdnvpn.test.functest.run_tempest')
-
-
-def main():
- verifier_id = tempest_utils.get_verifier_id()
- deployment_id = tempest_utils.get_verifier_deployment_id()
- src_tempest_dir = tempest_utils.get_verifier_deployment_dir(
- verifier_id, deployment_id)
-
- if not src_tempest_dir:
- logger.error("Rally deployment not found.")
- exit(-1)
-
- tempest_utils.configure_verifier(src_tempest_dir)
-
- src_tempest_conf = os.path.join(src_tempest_dir, 'tempest.conf')
- bgpvpn_tempest_conf = os.path.join(src_tempest_dir, 'bgpvpn_tempest.conf')
-
- if not os.path.isfile(src_tempest_conf):
- logger.error("tempest.conf not found in %s." % src_tempest_conf)
- exit(-1)
- shutil.copy(src_tempest_conf, bgpvpn_tempest_conf)
-
- glance_client = os_utils.get_glance_client()
- img_ref = os_utils.create_glance_image(glance_client,
- TESTCASE_CONFIG.image_name,
- COMMON_CONFIG.image_path,
- disk=COMMON_CONFIG.image_format,
- container="bare", public='public')
-
- nova_client = os_utils.get_nova_client()
- flav_ref = os_utils.get_flavor_id(nova_client,
- COMMON_CONFIG.default_flavor)
-
- logger.info("Copying tempest.conf to %s." % bgpvpn_tempest_conf)
- config = ConfigParser.RawConfigParser()
- config.read(bgpvpn_tempest_conf)
- config.set('service_available', 'bgpvpn', 'True')
- logger.debug("Updating %s with bgpvpn=True" % bgpvpn_tempest_conf)
- config.set('compute', 'flavor_ref', flav_ref)
- logger.debug("Updating %s with flavor_id %s"
- % (bgpvpn_tempest_conf, flav_ref))
- config.set('compute', 'image_ref', img_ref)
- logger.debug("Updating %s with image_id %s"
- % (bgpvpn_tempest_conf, img_ref))
- with open(bgpvpn_tempest_conf, 'wb') as tempest_conf:
- config.write(tempest_conf)
-
- # TODO: Though --config-file parameter is set during the tempest run,
- # it looks for tempest.conf at /etc/tempest/ directory. so applying
- # the following workaround. Will remove it when the root cause is found.
- cmd = ("mkdir -p /etc/tempest;"
- "cp {0} /etc/tempest/tempest.conf".format(bgpvpn_tempest_conf))
- logger.info("Configuring default tempest conf file")
- os.popen(cmd)
-
- cmd_line = "tempest run -t --regex networking_bgpvpn_tempest " \
- "--config-file /etc/tempest/tempest.conf"
- logger.info("Executing: %s" % cmd_line)
- cmd = os.popen(cmd_line)
- output = cmd.read()
- logger.debug(output)
-
- # Results parsing
- error_logs = ""
- duration = 0
- failed = 0
- try:
- # Look For errors
- error_logs = ""
- for match in re.findall('(.*?)[. ]*FAILED', output):
- error_logs += match
- # look for duration
- m = re.search('tests in(.*)sec', output)
- duration = m.group(1)
- # Look for num tests run
- m = re.search('Ran:(.*)tests', output)
- num_tests = m.group(1)
- # Look for tests failed
- m = re.search('- Failed:(.*)', output)
- failed = m.group(1)
- # Look for name of the tests
- testcases = re.findall("\{0\} (.*)", output)
-
- results = {"duration": duration,
- "num_tests": num_tests, "failed": failed,
- "tests": testcases}
- if int(failed) == 0:
- status = "PASS"
- else:
- status = "FAIL"
-
- return {"status": status, "details": results}
- except Exception as e:
- logger.error("Problem when parsing the results: %s", e)
- finally:
- os_utils.delete_glance_image(glance_client, img_ref)
- logger.debug("Deleted image %s" % img_ref)
-
-if __name__ == '__main__':
- main()
diff --git a/sdnvpn/test/functest/testcase_1.py b/sdnvpn/test/functest/testcase_1.py
index 35e32b2..b524abf 100644
--- a/sdnvpn/test/functest/testcase_1.py
+++ b/sdnvpn/test/functest/testcase_1.py
@@ -25,37 +25,36 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
try:
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
- network_1_id = test_utils.create_net(neutron_client,
+ network_1_id = test_utils.create_net(conn,
TESTCASE_CONFIG.net_1_name)
- subnet_1_id = test_utils.create_subnet(neutron_client,
+ subnet_1_id = test_utils.create_subnet(conn,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
network_1_id)
- network_2_id = test_utils.create_net(neutron_client,
+ network_2_id = test_utils.create_net(conn,
TESTCASE_CONFIG.net_2_name)
- subnet_2_id = test_utils.create_subnet(neutron_client,
+ subnet_2_id = test_utils.create_subnet(conn,
TESTCASE_CONFIG.subnet_2_name,
TESTCASE_CONFIG.subnet_2_cidr,
network_2_id)
@@ -63,49 +62,49 @@ def main():
subnet_ids.extend([subnet_1_id, subnet_2_id])
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
# boot INTANCES
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
vm_3 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_3_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
- vm_3_ip = test_utils.get_instance_ip(vm_3)
+ vm_3_ip = test_utils.get_instance_ip(conn, vm_3)
vm_5 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_5_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
- vm_5_ip = test_utils.get_instance_ip(vm_5)
+ vm_5_ip = test_utils.get_instance_ip(conn, vm_5)
# We boot vm5 first because we need vm5_ip for vm4 userdata
u4 = test_utils.generate_ping_userdata([vm_5_ip])
vm_4 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_4_name,
image_id,
network_2_id,
@@ -113,7 +112,7 @@ def main():
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u4)
- vm_4_ip = test_utils.get_instance_ip(vm_4)
+ vm_4_ip = test_utils.get_instance_ip(conn, vm_4)
# We boot VM1 at the end because we need to get the IPs first
# to generate the userdata
@@ -122,7 +121,7 @@ def main():
vm_4_ip,
vm_5_ip])
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -244,9 +243,9 @@ def main():
logger.error("exception occurred while executing testcase_1: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
diff --git a/sdnvpn/test/functest/testcase_10.py b/sdnvpn/test/functest/testcase_10.py
index aebc146..3ba93a9 100644
--- a/sdnvpn/test/functest/testcase_10.py
+++ b/sdnvpn/test/functest/testcase_10.py
@@ -28,14 +28,15 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
'sdnvpn.test.functest.testcase_10')
-def monitor(in_data, out_data, vm):
+def monitor(conn, in_data, out_data, vm):
# At the beginning of ping we might have some
# failures, so we ignore the first 10 pings
lines_offset = 20
while in_data["stop_thread"] is False:
try:
time.sleep(1)
- vm_console_out_lines = vm.get_console_output().split('\n')
+ vm_console_out_lines = conn.compute.\
+ get_server_console_output(vm)['output'].split('\n')
if lines_offset < len(vm_console_out_lines):
for console_line in vm_console_out_lines[lines_offset:-1]:
is_ping_error = re.match(r'ping.*KO', console_line)
@@ -54,7 +55,7 @@ def monitor(in_data, out_data, vm):
logger.info("Ping from instance {}: {}".
format(vm.name, console_line))
lines_offset = len(vm_console_out_lines)
- except:
+ except Exception:
# Atomic write to std out
with std_out_lock:
logger.error("Failure in monitor_thread of instance {}".
@@ -64,19 +65,18 @@ def monitor(in_data, out_data, vm):
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
- image_id = os_utils.create_glance_image(glance_client,
+ image_id = os_utils.create_glance_image(conn,
TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path,
disk=COMMON_CONFIG.image_format,
@@ -84,9 +84,9 @@ def main():
public='public')
image_ids.append(image_id)
- network_1_id = test_utils.create_net(neutron_client,
+ network_1_id = test_utils.create_net(conn,
TESTCASE_CONFIG.net_1_name)
- subnet_1_id = test_utils.create_subnet(neutron_client,
+ subnet_1_id = test_utils.create_subnet(conn,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
network_1_id)
@@ -94,28 +94,28 @@ def main():
network_ids.append(network_1_id)
subnet_ids.append(subnet_1_id)
- sg_id = os_utils.create_security_group_full(neutron_client,
+ sg_id = os_utils.create_security_group_full(conn,
TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
# boot INSTANCES
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
- vm2_ip = test_utils.get_instance_ip(vm_2)
+ vm2_ip = test_utils.get_instance_ip(conn, vm_2)
u1 = test_utils.generate_ping_userdata([vm2_ip])
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -123,11 +123,11 @@ def main():
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u1)
- vm1_ip = test_utils.get_instance_ip(vm_1)
+ vm1_ip = test_utils.get_instance_ip(conn, vm_1)
u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip])
vm_3 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_3_name,
image_id,
network_1_id,
@@ -135,7 +135,7 @@ def main():
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2,
userdata=u3)
- vm3_ip = test_utils.get_instance_ip(vm_3)
+ vm3_ip = test_utils.get_instance_ip(conn, vm_3)
# We do not put vm_2 id in instance_ids table because we will
# delete the current instance during the testing process
instance_ids.extend([vm_1.id, vm_3.id])
@@ -153,19 +153,19 @@ def main():
monitor_output1 = m.dict()
monitor_input1["stop_thread"] = False
monitor_output1["error_msg"] = ""
- monitor_thread1 = Process(target=monitor, args=(monitor_input1,
+ monitor_thread1 = Process(target=monitor, args=(conn, monitor_input1,
monitor_output1, vm_1,))
monitor_input2 = m.dict()
monitor_output2 = m.dict()
monitor_input2["stop_thread"] = False
monitor_output2["error_msg"] = ""
- monitor_thread2 = Process(target=monitor, args=(monitor_input2,
+ monitor_thread2 = Process(target=monitor, args=(conn, monitor_input2,
monitor_output2, vm_2,))
monitor_input3 = m.dict()
monitor_output3 = m.dict()
monitor_input3["stop_thread"] = False
monitor_output3["error_msg"] = ""
- monitor_thread3 = Process(target=monitor, args=(monitor_input3,
+ monitor_thread3 = Process(target=monitor, args=(conn, monitor_input3,
monitor_output3, vm_3,))
# Lists of all monitor threads and their inputs and outputs.
threads = [monitor_thread1, monitor_thread2, monitor_thread3]
@@ -191,7 +191,7 @@ def main():
results.add_failure(monitor_err_msg)
# Stop monitor thread 2 and delete instance vm_2
thread_inputs[1]["stop_thread"] = True
- if not os_utils.delete_instance(nova_client, vm_2.id):
+ if not os_utils.delete_instance(conn, vm_2.id):
logger.error("Fail to delete vm_2 instance during "
"testing process")
raise Exception("Fail to delete instance vm_2.")
@@ -205,7 +205,7 @@ def main():
# Create a new vm (vm_4) on compute 1 node
u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip])
vm_4 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_4_name,
image_id,
network_1_id,
@@ -225,7 +225,7 @@ def main():
monitor_output4 = m.dict()
monitor_input4["stop_thread"] = False
monitor_output4["error_msg"] = ""
- monitor_thread4 = Process(target=monitor, args=(monitor_input4,
+ monitor_thread4 = Process(target=monitor, args=(conn, monitor_input4,
monitor_output4,
vm_4,))
threads.append(monitor_thread4)
@@ -259,11 +259,11 @@ def main():
for thread in threads:
thread.join()
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
- interfaces, subnet_ids, router_ids,
- network_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
+ bgpvpn_ids, interfaces, subnet_ids,
+ router_ids, network_ids)
return results.compile_summary()
diff --git a/sdnvpn/test/functest/testcase_11.py b/sdnvpn/test/functest/testcase_11.py
index c597c4d..fd2c74a 100644
--- a/sdnvpn/test/functest/testcase_11.py
+++ b/sdnvpn/test/functest/testcase_11.py
@@ -24,15 +24,14 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
openstack_nodes = test_utils.get_nodes()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
@@ -40,14 +39,14 @@ def main():
try:
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
- network_1_id = test_utils.create_net(neutron_client,
+ network_1_id = test_utils.create_net(conn,
TESTCASE_CONFIG.net_1_name)
- subnet_1_id = test_utils.create_subnet(neutron_client,
+ subnet_1_id = test_utils.create_subnet(conn,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
network_1_id)
@@ -56,12 +55,11 @@ def main():
subnet_ids.append(subnet_1_id)
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
# Check required number of compute nodes
- compute_hostname = (
- nova_client.hypervisors.list()[0].hypervisor_hostname)
+ compute_hostname = conn.compute.hypervisors().next().name
compute_nodes = [node for node in openstack_nodes
if node.is_compute()]
@@ -74,7 +72,7 @@ def main():
# boot INSTANCES
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
@@ -83,7 +81,7 @@ def main():
compute_node=av_zone_1)
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -128,11 +126,11 @@ def main():
raise
finally:
# Cleanup topology
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
- interfaces, subnet_ids, router_ids,
- network_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
+ bgpvpn_ids, interfaces, subnet_ids,
+ router_ids, network_ids)
# Connect again OVS to Controller
for compute_node in compute_nodes:
compute_node.run_cmd("sudo ovs-vsctl set-controller {} {}".
diff --git a/sdnvpn/test/functest/testcase_12.py b/sdnvpn/test/functest/testcase_12.py
index 3e13d69..6bb8140 100644
--- a/sdnvpn/test/functest/testcase_12.py
+++ b/sdnvpn/test/functest/testcase_12.py
@@ -24,15 +24,14 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
openstack_nodes = test_utils.get_nodes()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
@@ -40,14 +39,14 @@ def main():
try:
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
- network_1_id = test_utils.create_net(neutron_client,
+ network_1_id = test_utils.create_net(conn,
TESTCASE_CONFIG.net_1_name)
- subnet_1_id = test_utils.create_subnet(neutron_client,
+ subnet_1_id = test_utils.create_subnet(conn,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
network_1_id)
@@ -56,12 +55,11 @@ def main():
subnet_ids.append(subnet_1_id)
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
# Check required number of compute nodes
- compute_hostname = (
- nova_client.hypervisors.list()[0].hypervisor_hostname)
+ compute_hostname = conn.compute.hypervisors().next().name
compute_nodes = [node for node in openstack_nodes
if node.is_compute()]
@@ -76,7 +74,7 @@ def main():
# boot INSTANCES
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
@@ -85,7 +83,7 @@ def main():
compute_node=av_zone_1)
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -185,11 +183,11 @@ def main():
raise
finally:
# Cleanup topology
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
- interfaces, subnet_ids, router_ids,
- network_ids)
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
+ bgpvpn_ids, interfaces, subnet_ids,
+ router_ids, network_ids)
return results.compile_summary()
diff --git a/sdnvpn/test/functest/testcase_13.py b/sdnvpn/test/functest/testcase_13.py
index 8beb1db..e15c8f1 100644
--- a/sdnvpn/test/functest/testcase_13.py
+++ b/sdnvpn/test/functest/testcase_13.py
@@ -26,7 +26,8 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
@@ -35,7 +36,7 @@ def main():
if not os.path.isfile(COMMON_CONFIG.ubuntu_image_path):
logger.info("Downloading image")
image_dest_path = '/'.join(
- COMMON_CONFIG.ubuntu_image_path.split('/')[:-1])
+ COMMON_CONFIG.ubuntu_image_path.split('/')[:-1])
os_utils.download_url(
"http://artifacts.opnfv.org/sdnvpn/"
"ubuntu-16.04-server-cloudimg-amd64-disk1.img",
@@ -43,16 +44,14 @@ def main():
else:
logger.info("Using old image")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids, flavor_ids) = ([] for i in range(9))
try:
image_id = os_utils.create_glance_image(
- glance_client,
+ conn,
COMMON_CONFIG.ubuntu_image_name,
COMMON_CONFIG.ubuntu_image_path,
disk="qcow2",
@@ -64,7 +63,7 @@ def main():
flavor_ids.append(flavor_id)
network_1_id, subnet_1_id, router_1_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_1_name,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
@@ -76,10 +75,10 @@ def main():
router_ids.extend([router_1_id])
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
@@ -91,7 +90,7 @@ def main():
TESTCASE_CONFIG.extra_route_subnet_mask)
# boot INTANCES
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -100,18 +99,18 @@ def main():
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u1)
- vm_1_ip = test_utils.get_instance_ip(vm_1)
+ vm_1_ip = test_utils.get_instance_ip(conn, vm_1)
- vm1_port = test_utils.get_port(neutron_client, vm_1.id)
+ vm1_port = test_utils.get_port(conn, vm_1.id)
test_utils.update_port_allowed_address_pairs(
- neutron_client,
- vm1_port['id'],
+ conn,
+ vm1_port.id,
[test_utils.AllowedAddressPair(
TESTCASE_CONFIG.extra_route_cidr,
- vm1_port['mac_address'])])
+ vm1_port.mac_address)])
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
@@ -120,20 +119,20 @@ def main():
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u1)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
- vm2_port = test_utils.get_port(neutron_client, vm_2.id)
+ vm2_port = test_utils.get_port(conn, vm_2.id)
test_utils.update_port_allowed_address_pairs(
- neutron_client,
- vm2_port['id'],
+ conn,
+ vm2_port.id,
[test_utils.AllowedAddressPair(
TESTCASE_CONFIG.extra_route_cidr,
- vm2_port['mac_address'])])
+ vm2_port.mac_address)])
test_utils.async_Wait_for_instances([vm_1, vm_2])
image_2_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_2_id)
@@ -144,7 +143,7 @@ def main():
u3 = test_utils.generate_ping_userdata(
[TESTCASE_CONFIG.extra_route_ip])
vm_3 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_3_name,
image_2_id,
network_1_id,
@@ -184,7 +183,7 @@ def main():
neutron_client, bgpvpn_id, router_1_id)
test_utils.update_router_extra_route(
- neutron_client, router_1_id,
+ conn, router_1_id,
[test_utils.ExtraRoute(TESTCASE_CONFIG.extra_route_cidr,
vm_1_ip),
test_utils.ExtraRoute(TESTCASE_CONFIG.extra_route_cidr,
@@ -207,10 +206,10 @@ def main():
logger.error("exception occurred while executing testcase_13: %s", e)
raise
finally:
- test_utils.update_router_no_extra_route(neutron_client, router_ids)
- test_utils.cleanup_nova(nova_client, instance_ids, flavor_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ test_utils.update_router_no_extra_route(conn, router_ids)
+ test_utils.cleanup_nova(conn, instance_ids, flavor_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
diff --git a/sdnvpn/test/functest/testcase_1bis.py b/sdnvpn/test/functest/testcase_1bis.py
new file mode 100644
index 0000000..30a0abf
--- /dev/null
+++ b/sdnvpn/test/functest/testcase_1bis.py
@@ -0,0 +1,209 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2018 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import logging
+import sys
+import pkg_resources
+
+from random import randint
+from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
+from sdnvpn.lib import utils as test_utils
+from sdnvpn.lib.results import Results
+
+logger = logging.getLogger(__name__)
+
+COMMON_CONFIG = sdnvpn_config.CommonConfig()
+TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
+ 'sdnvpn.test.functest.testcase_1bis')
+
+
+def main():
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
+
+ results.add_to_summary(0, "=")
+ results.add_to_summary(2, "STATUS", "SUBTEST")
+ results.add_to_summary(0, "=")
+
+ conn = os_utils.get_os_connection()
+ # neutron client is needed as long as bgpvpn heat module
+ # is not yet installed by default in apex (APEX-618)
+ neutron_client = os_utils.get_neutron_client()
+
+ image_ids = []
+ bgpvpn_ids = []
+
+ try:
+ # image created outside HOT (OS::Glance::Image deprecated since ocata)
+ image_id = os_utils.create_glance_image(
+ conn, TESTCASE_CONFIG.image_name,
+ COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
+ container="bare", public='public')
+ image_ids = [image_id]
+
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
+ az_1 = "nova:" + compute_nodes[0]
+ az_2 = "nova:" + compute_nodes[1]
+
+ file_path = pkg_resources.resource_filename(
+ 'sdnvpn', TESTCASE_CONFIG.hot_file_name)
+ templ = open(file_path, 'r').read()
+ logger.debug("Template is read: '%s'" % templ)
+ env = test_utils.get_heat_environment(TESTCASE_CONFIG, COMMON_CONFIG)
+ logger.debug("Environment is read: '%s'" % env)
+
+ env['name'] = TESTCASE_CONFIG.stack_name
+ env['template'] = templ
+ env['parameters']['image_n'] = TESTCASE_CONFIG.image_name
+ env['parameters']['av_zone_1'] = az_1
+ env['parameters']['av_zone_2'] = az_2
+
+ stack_id = os_utils.create_stack(conn, **env)
+ if stack_id is None:
+ logger.error("Stack create start failed")
+ raise SystemError("Stack create start failed")
+
+ test_utils.wait_stack_for_status(conn, stack_id, 'CREATE_COMPLETE')
+
+ net_1_output = os_utils.get_output(conn, stack_id, 'net_1_o')
+ network_1_id = net_1_output['output_value']
+ net_2_output = os_utils.get_output(conn, stack_id, 'net_2_o')
+ network_2_id = net_2_output['output_value']
+
+ vm_stack_output_keys = ['vm1_o', 'vm2_o', 'vm3_o', 'vm4_o', 'vm5_o']
+ vms = test_utils.get_vms_from_stack_outputs(conn,
+ stack_id,
+ vm_stack_output_keys)
+
+ logger.debug("Entering base test case with stack '%s'" % stack_id)
+
+ msg = ("Create VPN with eRT<>iRT")
+ results.record_action(msg)
+ vpn_name = "sdnvpn-" + str(randint(100000, 999999))
+ kwargs = {
+ "import_targets": TESTCASE_CONFIG.targets1,
+ "export_targets": TESTCASE_CONFIG.targets2,
+ "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
+ "name": vpn_name
+ }
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['net_1_name'])
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_1_id)
+
+ # Remember: vms[X] is former vm_X+1
+
+ results.get_ping_status(vms[0], vms[1], expected="PASS", timeout=200)
+ results.get_ping_status(vms[0], vms[2], expected="PASS", timeout=30)
+ results.get_ping_status(vms[0], vms[3], expected="FAIL", timeout=30)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['net_2_name'])
+ results.add_to_summary(0, "-")
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_net_assocs(neutron_client,
+ bgpvpn_id,
+ network_1_id,
+ network_2_id)
+
+ logger.info("Waiting for the VMs to connect to each other using the"
+ " updated network configuration")
+ test_utils.wait_before_subtest()
+
+ results.get_ping_status(vms[3], vms[4], expected="PASS", timeout=30)
+ # TODO enable again when isolation in VPN with iRT != eRT works
+ # results.get_ping_status(vms[0], vms[3], expected="FAIL", timeout=30)
+ # results.get_ping_status(vms[0], vms[4], expected="FAIL", timeout=30)
+
+ msg = ("Update VPN with eRT=iRT ...")
+ results.add_to_summary(0, "-")
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ # use bgpvpn-create instead of update till NETVIRT-1067 bug is fixed
+ # kwargs = {"import_targets": TESTCASE_CONFIG.targets1,
+ # "export_targets": TESTCASE_CONFIG.targets1,
+ # "name": vpn_name}
+ # bgpvpn = test_utils.update_bgpvpn(neutron_client,
+ # bgpvpn_id, **kwargs)
+
+ test_utils.delete_bgpvpn(neutron_client, bgpvpn_id)
+ bgpvpn_ids.remove(bgpvpn_id)
+ kwargs = {
+ "import_targets": TESTCASE_CONFIG.targets1,
+ "export_targets": TESTCASE_CONFIG.targets1,
+ "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
+ "name": vpn_name
+ }
+
+ test_utils.wait_before_subtest()
+
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN re-created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['net_1_name'])
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_1_id)
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_net_assocs(neutron_client,
+ bgpvpn_id,
+ network_1_id,
+ network_2_id)
+ # The above code has to be removed after re-enabling bgpvpn-update
+
+ logger.info("Waiting for the VMs to connect to each other using the"
+ " updated network configuration")
+ test_utils.wait_before_subtest()
+
+ results.get_ping_status(vms[0], vms[3], expected="PASS", timeout=30)
+ results.get_ping_status(vms[0], vms[4], expected="PASS", timeout=30)
+
+ except Exception as e:
+ logger.error("exception occurred while executing testcase_1bis: %s", e)
+ raise
+ finally:
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, [], bgpvpn_ids,
+ [], [], [], [])
+
+ try:
+ test_utils.delete_stack_and_wait(conn, stack_id)
+ except Exception as e:
+ logger.error(
+ "exception occurred while executing testcase_1bis: %s", e)
+
+ return results.compile_summary()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_2.py b/sdnvpn/test/functest/testcase_2.py
index ee74d8d..b4f05b2 100644
--- a/sdnvpn/test/functest/testcase_2.py
+++ b/sdnvpn/test/functest/testcase_2.py
@@ -8,6 +8,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
+import base64
import logging
import sys
@@ -25,15 +26,14 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
@@ -44,40 +44,41 @@ def main():
keyfile = open(COMMON_CONFIG.keyfile_path, 'r')
key = keyfile.read()
keyfile.close()
- files = {"/home/cirros/id_rsa": key}
+ files = [{'path': '/home/cirros/id_rsa',
+ 'contents': base64.b64encode(key)}]
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
network_1_id = test_utils.create_net(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_1_name)
subnet_1a_id = test_utils.create_subnet(
- neutron_client,
+ conn,
TESTCASE_CONFIG.subnet_1a_name,
TESTCASE_CONFIG.subnet_1a_cidr,
network_1_id)
# TODO: uncomment the commented lines once ODL has
# support for mulitple subnets under same neutron network
# subnet_1b_id = test_utils.create_subnet(
- # neutron_client,
+ # conn,
# TESTCASE_CONFIG.subnet_1b_name,
# TESTCASE_CONFIG.subnet_1b_cidr,
# network_1_id)
network_2_id = test_utils.create_net(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_2_name)
# subnet_2a_id = test_utils.create_subnet(
- # neutron_client,
+ # conn,
# TESTCASE_CONFIG.subnet_2a_name,
# TESTCASE_CONFIG.subnet_2a_cidr,
# network_2_id)
subnet_2b_id = test_utils.create_subnet(
- neutron_client,
+ conn,
TESTCASE_CONFIG.subnet_2b_name,
TESTCASE_CONFIG.subnet_2b_cidr,
network_2_id)
@@ -88,10 +89,10 @@ def main():
subnet_2b_id])
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
# av_zone_2 = "nova:" + compute_nodes[1]
@@ -99,7 +100,7 @@ def main():
# boot INTANCES
userdata_common = test_utils.generate_userdata_common()
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
@@ -111,7 +112,7 @@ def main():
# vm_3 = test_utils.create_instance(
-# nova_client,
+# conn,
# TESTCASE_CONFIG.instance_3_name,
# image_id,
# network_1_id,
@@ -122,7 +123,7 @@ def main():
# userdata=userdata_common)
#
# vm_5 = test_utils.create_instance(
-# nova_client,
+# conn,
# TESTCASE_CONFIG.instance_5_name,
# image_id,
# network_2_id,
@@ -139,7 +140,7 @@ def main():
# TESTCASE_CONFIG.instance_5_ip
])
vm_4 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_4_name,
image_id,
network_2_id,
@@ -159,7 +160,7 @@ def main():
# TESTCASE_CONFIG.instance_5_ip
])
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -267,9 +268,9 @@ def main():
logger.error("exception occurred while executing testcase_2: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
diff --git a/sdnvpn/test/functest/testcase_2bis.py b/sdnvpn/test/functest/testcase_2bis.py
new file mode 100644
index 0000000..3736c0c
--- /dev/null
+++ b/sdnvpn/test/functest/testcase_2bis.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2018 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import base64
+import logging
+import sys
+import pkg_resources
+
+from random import randint
+from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
+from sdnvpn.lib import utils as test_utils
+from sdnvpn.lib.results import Results
+
+logger = logging.getLogger(__name__)
+
+COMMON_CONFIG = sdnvpn_config.CommonConfig()
+TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
+ 'sdnvpn.test.functest.testcase_2bis')
+
+
+def main():
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
+
+ results.add_to_summary(0, '=')
+ results.add_to_summary(2, 'STATUS', 'SUBTEST')
+ results.add_to_summary(0, '=')
+
+ conn = os_utils.get_os_connection()
+ # neutron client is needed as long as bgpvpn heat module
+ # is not yet installed by default in apex (APEX-618)
+ neutron_client = os_utils.get_neutron_client()
+
+ image_ids = []
+ bgpvpn_ids = []
+
+ try:
+ logger.debug("Using private key %s injected to the VMs."
+ % COMMON_CONFIG.keyfile_path)
+ keyfile = open(COMMON_CONFIG.keyfile_path, 'r')
+ key_buf = keyfile.read()
+ keyfile.close()
+ key = base64.b64encode(key_buf)
+
+ # image created outside HOT (OS::Glance::Image deprecated since ocata)
+ image_id = os_utils.create_glance_image(
+ conn, TESTCASE_CONFIG.image_name,
+ COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
+ container='bare', public='public')
+ image_ids = [image_id]
+
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
+
+ az_1 = 'nova:' + compute_nodes[0]
+ # av_zone_2 = "nova:" + compute_nodes[1]
+
+ file_path = pkg_resources.resource_filename(
+ 'sdnvpn', TESTCASE_CONFIG.hot_file_name)
+ templ = open(file_path, 'r').read()
+ logger.debug("Template is read: '%s'" % templ)
+ env = test_utils.get_heat_environment(TESTCASE_CONFIG, COMMON_CONFIG)
+ logger.debug("Environment is read: '%s'" % env)
+
+ env['name'] = TESTCASE_CONFIG.stack_name
+ env['template'] = templ
+ env['parameters']['image_n'] = TESTCASE_CONFIG.image_name
+ env['parameters']['av_zone_1'] = az_1
+ env['parameters']['id_rsa_key'] = key
+
+ stack_id = os_utils.create_stack(conn, **env)
+ if stack_id is None:
+ logger.error('Stack create start failed')
+ raise SystemError('Stack create start failed')
+
+ test_utils.wait_stack_for_status(conn, stack_id, 'CREATE_COMPLETE')
+
+ net_1_output = os_utils.get_output(conn, stack_id, 'net_1_o')
+ network_1_id = net_1_output['output_value']
+ net_2_output = os_utils.get_output(conn, stack_id, 'net_2_o')
+ network_2_id = net_2_output['output_value']
+
+ vm_stack_output_keys = ['vm1_o', 'vm2_o', 'vm3_o', 'vm4_o', 'vm5_o']
+ vms = test_utils.get_vms_from_stack_outputs(conn,
+ stack_id,
+ vm_stack_output_keys)
+
+ logger.debug("Entering base test case with stack '%s'" % stack_id)
+
+ msg = ('Create VPN1 with eRT=iRT')
+ results.record_action(msg)
+ vpn1_name = 'sdnvpn-1-' + str(randint(100000, 999999))
+ kwargs = {
+ 'import_targets': TESTCASE_CONFIG.targets2,
+ 'export_targets': TESTCASE_CONFIG.targets2,
+ 'route_targets': TESTCASE_CONFIG.targets2,
+ 'route_distinguishers': TESTCASE_CONFIG.route_distinguishers1,
+ 'name': vpn1_name
+ }
+ bgpvpn1 = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn1_id = bgpvpn1['bgpvpn']['id']
+ logger.debug("VPN1 created details: %s" % bgpvpn1)
+ bgpvpn_ids.append(bgpvpn1_id)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['net_1_name'])
+ results.record_action(msg)
+ results.add_to_summary(0, '-')
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn1_id, network_1_id)
+
+ logger.info('Waiting for the VMs to connect to each other using the'
+ ' updated network configuration for VPN1')
+ test_utils.wait_before_subtest()
+
+ # Remember: vms[X] has instance_X+1_name
+
+ # 10.10.10.12 should return sdnvpn-2 to sdnvpn-1
+ results.check_ssh_output(
+ vms[0], vms[1],
+ expected=TESTCASE_CONFIG.heat_parameters['instance_2_name'],
+ timeout=200)
+
+ results.add_to_summary(0, '-')
+ msg = ('Create VPN2 with eRT=iRT')
+ results.record_action(msg)
+ vpn2_name = 'sdnvpn-2-' + str(randint(100000, 999999))
+ kwargs = {
+ 'import_targets': TESTCASE_CONFIG.targets1,
+ 'export_targets': TESTCASE_CONFIG.targets1,
+ 'route_targets': TESTCASE_CONFIG.targets1,
+ 'route_distinguishers': TESTCASE_CONFIG.route_distinguishers2,
+ 'name': vpn2_name
+ }
+ bgpvpn2 = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn2_id = bgpvpn2['bgpvpn']['id']
+ logger.debug("VPN created details: %s" % bgpvpn2)
+ bgpvpn_ids.append(bgpvpn2_id)
+
+ msg = ("Associate network '%s' to the VPN2." %
+ TESTCASE_CONFIG.heat_parameters['net_2_name'])
+ results.record_action(msg)
+ results.add_to_summary(0, '-')
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn2_id, network_2_id)
+
+ test_utils.wait_for_bgp_net_assoc(neutron_client,
+ bgpvpn1_id, network_1_id)
+ test_utils.wait_for_bgp_net_assoc(neutron_client,
+ bgpvpn2_id, network_2_id)
+
+ logger.info('Waiting for the VMs to connect to each other using the'
+ ' updated network configuration for VPN2')
+ test_utils.wait_before_subtest()
+
+ # 10.10.10.11 should return 'not reachable' to sdnvpn-4
+ results.check_ssh_output(vms[3], vms[0],
+ expected='not reachable',
+ timeout=30)
+
+ except Exception as e:
+ logger.error("exception occurred while executing testcase_2bis: %s", e)
+ raise
+ finally:
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, [], bgpvpn_ids,
+ [], [], [], [])
+
+ try:
+ test_utils.delete_stack_and_wait(conn, stack_id)
+ except Exception as e:
+ logger.error(
+ "exception occurred while executing testcase_2bis: %s", e)
+
+ return results.compile_summary()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_3.py b/sdnvpn/test/functest/testcase_3.py
index a527236..48024cb 100644
--- a/sdnvpn/test/functest/testcase_3.py
+++ b/sdnvpn/test/functest/testcase_3.py
@@ -34,50 +34,64 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
openstack_nodes = test_utils.get_nodes()
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
# node.is_odl() doesn't work in Apex
# https://jira.opnfv.org/browse/RELENG-192
- controllers = [node for node in openstack_nodes
- if "running" in
- node.run_cmd("sudo systemctl status opendaylight")]
+ fuel_cmd = "sudo systemctl status opendaylight"
+ apex_cmd = "sudo docker exec opendaylight_api " \
+ "/opt/opendaylight/bin/status"
+ health_cmd = "sudo docker ps -f name=opendaylight_api -f " \
+ "health=healthy -q"
+ if installer_type in ["fuel"]:
+ odl_nodes = [node for node in openstack_nodes
+ if "running" in node.run_cmd(fuel_cmd)]
+ elif installer_type in ["apex"]:
+ odl_nodes = [node for node in openstack_nodes
+ if node.run_cmd(health_cmd)
+ if "Running" in node.run_cmd(apex_cmd)]
+ else:
+ logger.error("Incompatible installer type")
+
computes = [node for node in openstack_nodes if node.is_compute()]
msg = ("Verify that OpenDaylight can start/communicate with zrpcd/Quagga")
results.record_action(msg)
results.add_to_summary(0, "-")
- if not controllers:
- msg = ("Controller (ODL) list is empty. Skipping rest of tests.")
+ if not odl_nodes:
+ msg = ("ODL node list is empty. Skipping rest of tests.")
logger.info(msg)
results.add_failure(msg)
return results.compile_summary()
else:
- msg = ("Controller (ODL) list is ready")
+ msg = ("ODL node list is ready")
logger.info(msg)
results.add_success(msg)
logger.info("Checking if zrpcd is "
- "running on the controller nodes")
+ "running on the opendaylight nodes")
- for controller in controllers:
- output_zrpcd = controller.run_cmd("ps --no-headers -C "
- "zrpcd -o state")
+ for odl_node in odl_nodes:
+ output_zrpcd = odl_node.run_cmd("ps --no-headers -C "
+ "zrpcd -o state")
states = output_zrpcd.split()
running = any([s != 'Z' for s in states])
- msg = ("zrpcd is running in {name}".format(name=controller.name))
+ msg = ("zrpcd is running in {name}".format(name=odl_node.name))
if not running:
- logger.info("zrpcd is not running on the controller node {name}"
- .format(name=controller.name))
+ logger.info("zrpcd is not running on the opendaylight node {name}"
+ .format(name=odl_node.name))
results.add_failure(msg)
else:
- logger.info("zrpcd is running on the controller node {name}"
- .format(name=controller.name))
+ logger.info("zrpcd is running on the opendaylight node {name}"
+ .format(name=odl_node.name))
results.add_success(msg)
results.add_to_summary(0, "-")
@@ -85,51 +99,55 @@ def main():
# Find the BGP entity owner in ODL because of this bug:
# https://jira.opendaylight.org/browse/NETVIRT-1308
msg = ("Found BGP entity owner")
- controller = test_utils.get_odl_bgp_entity_owner(controllers)
- if controller is None:
+ odl_node = test_utils.get_odl_bgp_entity_owner(odl_nodes)
+ if odl_node is None:
logger.error("Failed to find the BGP entity owner")
results.add_failure(msg)
else:
logger.info('BGP entity owner is {name}'
- .format(name=controller.name))
+ .format(name=odl_node.name))
results.add_success(msg)
results.add_to_summary(0, "-")
- get_ext_ip_cmd = "sudo ip a | grep br-ex | grep inet | awk '{print $2}'"
- ext_net_cidr = controller.run_cmd(get_ext_ip_cmd).strip().split('\n')
- ext_net_mask = ext_net_cidr[0].split('/')[1]
- controller_ext_ip = ext_net_cidr[0].split('/')[0]
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ if installer_type in ['apex']:
+ odl_interface = 'br-ex'
+ elif installer_type in ['fuel']:
+ odl_interface = 'br-ext'
+ else:
+ logger.error("Incompatible installer type")
+ odl_ip, odl_netmask = test_utils.get_node_ip_and_netmask(
+ odl_node, odl_interface)
- logger.info("Starting bgp speaker of controller at IP %s "
- % controller_ext_ip)
+ logger.info("Starting bgp speaker of opendaylight node at IP %s "
+ % odl_ip)
# Ensure that ZRPCD ip & port are well configured within ODL
add_client_conn_to_bgp = "bgp-connect -p 7644 -h 127.0.0.1 add"
- test_utils.run_odl_cmd(controller, add_client_conn_to_bgp)
+ test_utils.run_odl_cmd(odl_node, add_client_conn_to_bgp)
# Start bgp daemon
start_quagga = "odl:configure-bgp -op start-bgp-server " \
- "--as-num 100 --router-id {0}".format(controller_ext_ip)
- test_utils.run_odl_cmd(controller, start_quagga)
+ "--as-num 100 --router-id {0}".format(odl_ip)
+ test_utils.run_odl_cmd(odl_node, start_quagga)
# we need to wait a bit until the bgpd is up
time.sleep(5)
- logger.info("Checking if bgpd is running"
- " on the controller node")
+ logger.info("Checking if bgpd is running on the opendaylight node")
# Check if there is a non-zombie bgpd process
- output_bgpd = controller.run_cmd("ps --no-headers -C "
- "bgpd -o state")
+ output_bgpd = odl_node.run_cmd("ps --no-headers -C "
+ "bgpd -o state")
states = output_bgpd.split()
running = any([s != 'Z' for s in states])
msg = ("bgpd is running")
if not running:
- logger.info("bgpd is not running on the controller node")
+ logger.info("bgpd is not running on the opendaylight node")
results.add_failure(msg)
else:
- logger.info("bgpd is running on the controller node")
+ logger.info("bgpd is running on the opendaylight node")
results.add_success(msg)
results.add_to_summary(0, "-")
@@ -138,29 +156,29 @@ def main():
# but the test is disabled because of buggy upstream
# https://github.com/6WIND/zrpcd/issues/15
# stop_quagga = 'odl:configure-bgp -op stop-bgp-server'
- # test_utils.run_odl_cmd(controller, stop_quagga)
+ # test_utils.run_odl_cmd(odl_node, stop_quagga)
# logger.info("Checking if bgpd is still running"
- # " on the controller node")
+ # " on the opendaylight node")
- # output_bgpd = controller.run_cmd("ps --no-headers -C " \
- # "bgpd -o state")
+ # output_bgpd = odl_node.run_cmd("ps --no-headers -C " \
+ # "bgpd -o state")
# states = output_bgpd.split()
# running = any([s != 'Z' for s in states])
# msg = ("bgpd is stopped")
# if not running:
- # logger.info("bgpd is not running on the controller node")
+ # logger.info("bgpd is not running on the opendaylight node")
# results.add_success(msg)
# else:
- # logger.info("bgpd is still running on the controller node")
+ # logger.info("bgpd is still running on the opendaylight node")
# results.add_failure(msg)
# Taken from the sfc tests
if not os.path.isfile(COMMON_CONFIG.ubuntu_image_path):
logger.info("Downloading image")
image_dest_path = '/'.join(
- COMMON_CONFIG.ubuntu_image_path.split('/')[:-1])
+ COMMON_CONFIG.ubuntu_image_path.split('/')[:-1])
os_utils.download_url(
"http://artifacts.opnfv.org/sdnvpn/"
"ubuntu-16.04-server-cloudimg-amd64-disk1.img",
@@ -168,33 +186,33 @@ def main():
else:
logger.info("Using old image")
- glance_client = os_utils.get_glance_client()
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids, flavor_ids) = ([] for i in range(9))
+ quagga_vm = None
+ fake_fip = None
try:
_, flavor_id = test_utils.create_custom_flavor()
flavor_ids.append(flavor_id)
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- test_utils.open_icmp(neutron_client, sg_id)
- test_utils.open_http_port(neutron_client, sg_id)
+ test_utils.open_icmp(conn, sg_id)
+ test_utils.open_http_port(conn, sg_id)
- test_utils.open_bgp_port(neutron_client, sg_id)
+ test_utils.open_bgp_port(conn, sg_id)
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
net_1_id, subnet_1_id, router_1_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_1_name,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
@@ -202,7 +220,7 @@ def main():
quagga_net_id, subnet_quagga_id, \
router_quagga_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.quagga_net_name,
TESTCASE_CONFIG.quagga_subnet_name,
TESTCASE_CONFIG.quagga_subnet_cidr,
@@ -223,7 +241,7 @@ def main():
logger.error("Incompatible installer type")
ubuntu_image_id = os_utils.create_glance_image(
- glance_client,
+ conn,
COMMON_CONFIG.ubuntu_image_name,
COMMON_CONFIG.ubuntu_image_path,
disk,
@@ -243,11 +261,12 @@ def main():
# cloud-init script.
# fake_fip is needed to bypass NAT
# see below for the reason why.
- fake_fip = os_utils.create_floating_ip(neutron_client)
+ fake_fip = os_utils.create_floating_ip(conn)
# pin quagga to some compute
floatingip_ids.append(fake_fip['fip_id'])
- compute_node = nova_client.hypervisors.list()[0]
- quagga_compute_node = "nova:" + compute_node.hypervisor_hostname
+ compute_node = conn.compute.hypervisors().next()
+ compute_node = conn.compute.get_hypervisor(compute_node)
+ quagga_compute_node = "nova:" + compute_node.name
# Map the hypervisor used above to a compute handle
# returned by releng's manager
for comp in computes:
@@ -255,16 +274,16 @@ def main():
compute = comp
break
quagga_bootstrap_script = quagga.gen_quagga_setup_script(
- controller_ext_ip,
+ odl_ip,
fake_fip['fip_addr'],
- ext_net_mask,
+ odl_netmask,
TESTCASE_CONFIG.external_network_ip_prefix,
TESTCASE_CONFIG.route_distinguishers,
TESTCASE_CONFIG.import_targets,
TESTCASE_CONFIG.export_targets)
quagga_vm = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.quagga_instance_name,
ubuntu_image_id,
quagga_net_id,
@@ -274,50 +293,46 @@ def main():
userdata=quagga_bootstrap_script,
compute_node=quagga_compute_node)
- instance_ids.append(quagga_vm)
+ instance_ids.append(quagga_vm.id)
- quagga_vm_port = test_utils.get_port(neutron_client,
+ quagga_vm_port = test_utils.get_port(conn,
quagga_vm.id)
- fip_added = os_utils.attach_floating_ip(neutron_client,
- quagga_vm_port['id'])
+ fip_added = os_utils.attach_floating_ip(conn,
+ quagga_vm_port.id)
msg = ("Assign a Floating IP to %s " %
TESTCASE_CONFIG.quagga_instance_name)
if fip_added:
results.add_success(msg)
- floatingip_ids.append(fip_added['floatingip']['id'])
+ floatingip_ids.append(fip_added.id)
else:
results.add_failure(msg)
test_utils.attach_instance_to_ext_br(quagga_vm, compute)
- try:
- testcase = "Bootstrap quagga inside an OpenStack instance"
- cloud_init_success = test_utils.wait_for_cloud_init(quagga_vm)
- if cloud_init_success:
- results.add_success(testcase)
- else:
- results.add_failure(testcase)
- results.add_to_summary(0, "=")
-
- results.add_to_summary(0, '-')
- results.add_to_summary(1, "Peer Quagga with OpenDaylight")
- results.add_to_summary(0, '-')
-
- neighbor = quagga.odl_add_neighbor(fake_fip['fip_addr'],
- controller_ext_ip,
- controller)
- peer = quagga.check_for_peering(controller)
-
- finally:
- test_utils.detach_instance_from_ext_br(quagga_vm, compute)
+ testcase = "Bootstrap quagga inside an OpenStack instance"
+ cloud_init_success = test_utils.wait_for_cloud_init(conn, quagga_vm)
+ if cloud_init_success:
+ results.add_success(testcase)
+ else:
+ results.add_failure(testcase)
+ results.add_to_summary(0, "=")
+
+ results.add_to_summary(0, '-')
+ results.add_to_summary(1, "Peer Quagga with OpenDaylight")
+ results.add_to_summary(0, '-')
+
+ neighbor = quagga.odl_add_neighbor(fake_fip['fip_addr'],
+ odl_ip,
+ odl_node)
+ peer = quagga.check_for_peering(odl_node)
if neighbor and peer:
results.add_success("Peering with quagga")
else:
results.add_failure("Peering with quagga")
- test_utils.add_quagga_external_gre_end_point(controllers,
+ test_utils.add_quagga_external_gre_end_point(odl_nodes,
fake_fip['fip_addr'])
test_utils.wait_before_subtest()
@@ -346,10 +361,10 @@ def main():
userdata_common = test_utils.generate_ping_userdata(
[TESTCASE_CONFIG.external_network_ip])
- compute_node = nova_client.hypervisors.list()[0]
- av_zone_1 = "nova:" + compute_node.hypervisor_hostname
+ compute_node = conn.compute.hypervisors().next()
+ av_zone_1 = "nova:" + compute_node.name
vm_bgpvpn = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
net_1_id,
@@ -358,10 +373,10 @@ def main():
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=userdata_common)
- instance_ids.append(vm_bgpvpn)
+ instance_ids.append(vm_bgpvpn.id)
# wait for VM to get IP
- instance_up = test_utils.wait_for_instances_up(vm_bgpvpn)
+ instance_up = test_utils.wait_for_instances_get_dhcp(vm_bgpvpn)
if not instance_up:
logger.error("One or more instances are down")
@@ -373,7 +388,7 @@ def main():
msg = ("External IP prefix %s is exchanged with ODL"
% TESTCASE_CONFIG.external_network_ip_prefix)
fib_added = test_utils.is_fib_entry_present_on_odl(
- controllers,
+ odl_nodes,
TESTCASE_CONFIG.external_network_ip_prefix,
TESTCASE_CONFIG.route_distinguishers)
if fib_added:
@@ -398,19 +413,22 @@ def main():
logger.error("exception occurred while executing testcase_3: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids, flavor_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ if quagga_vm is not None:
+ test_utils.detach_instance_from_ext_br(quagga_vm, compute)
+ test_utils.cleanup_nova(conn, instance_ids, flavor_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
- bgp_nbr_disconnect_cmd = ("bgp-nbr -i %s -a 200 del"
- % fake_fip['fip_addr'])
+ if fake_fip is not None:
+ bgp_nbr_disconnect_cmd = ("bgp-nbr -i %s -a 200 del"
+ % fake_fip['fip_addr'])
+ test_utils.run_odl_cmd(odl_node, bgp_nbr_disconnect_cmd)
bgp_server_stop_cmd = ("bgp-rtr -r %s -a 100 del"
- % controller_ext_ip)
+ % odl_ip)
odl_zrpc_disconnect_cmd = "bgp-connect -p 7644 -h 127.0.0.1 del"
- test_utils.run_odl_cmd(controller, bgp_nbr_disconnect_cmd)
- test_utils.run_odl_cmd(controller, bgp_server_stop_cmd)
- test_utils.run_odl_cmd(controller, odl_zrpc_disconnect_cmd)
+ test_utils.run_odl_cmd(odl_node, bgp_server_stop_cmd)
+ test_utils.run_odl_cmd(odl_node, odl_zrpc_disconnect_cmd)
return results.compile_summary()
diff --git a/sdnvpn/test/functest/testcase_4.py b/sdnvpn/test/functest/testcase_4.py
index 9b11cc3..650a88a 100644
--- a/sdnvpn/test/functest/testcase_4.py
+++ b/sdnvpn/test/functest/testcase_4.py
@@ -26,39 +26,38 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
try:
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
network_1_id, subnet_1_id, router_1_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_1_name,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
TESTCASE_CONFIG.router_1_name)
network_2_id = test_utils.create_net(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_2_name)
subnet_2_id = test_utils.create_subnet(
- neutron_client,
+ conn,
TESTCASE_CONFIG.subnet_2_name,
TESTCASE_CONFIG.subnet_2_cidr,
network_2_id)
@@ -68,50 +67,50 @@ def main():
subnet_ids.extend([subnet_1_id, subnet_2_id])
sg_id = os_utils.create_security_group_full(
- neutron_client,
+ conn,
TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
# boot INTANCES
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
vm_3 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_3_name,
image_id,
network_1_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
- vm_3_ip = test_utils.get_instance_ip(vm_3)
+ vm_3_ip = test_utils.get_instance_ip(conn, vm_3)
vm_5 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_5_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_2)
- vm_5_ip = test_utils.get_instance_ip(vm_5)
+ vm_5_ip = test_utils.get_instance_ip(conn, vm_5)
# We boot vm5 first because we need vm5_ip for vm4 userdata
u4 = test_utils.generate_ping_userdata([vm_5_ip])
vm_4 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_4_name,
image_id,
network_2_id,
@@ -119,7 +118,7 @@ def main():
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1,
userdata=u4)
- vm_4_ip = test_utils.get_instance_ip(vm_4)
+ vm_4_ip = test_utils.get_instance_ip(conn, vm_4)
# We boot VM1 at the end because we need to get the IPs
# first to generate the userdata
@@ -128,7 +127,7 @@ def main():
vm_4_ip,
vm_5_ip])
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -258,9 +257,9 @@ def main():
logger.error("exception occurred while executing testcase_4: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
diff --git a/sdnvpn/test/functest/testcase_4bis.py b/sdnvpn/test/functest/testcase_4bis.py
new file mode 100644
index 0000000..6245f7c
--- /dev/null
+++ b/sdnvpn/test/functest/testcase_4bis.py
@@ -0,0 +1,215 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2018 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import logging
+import sys
+import pkg_resources
+
+from random import randint
+from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
+from sdnvpn.lib import utils as test_utils
+from sdnvpn.lib.results import Results
+
+logger = logging.getLogger(__name__)
+
+COMMON_CONFIG = sdnvpn_config.CommonConfig()
+TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
+ 'sdnvpn.test.functest.testcase_4bis')
+
+
+def main():
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
+
+ results.add_to_summary(0, '=')
+ results.add_to_summary(2, 'STATUS', 'SUBTEST')
+ results.add_to_summary(0, '=')
+
+ conn = os_utils.get_os_connection()
+ # neutron client is needed as long as bgpvpn heat module
+ # is not yet installed by default in apex (APEX-618)
+ neutron_client = os_utils.get_neutron_client()
+
+ image_ids = []
+ bgpvpn_ids = []
+
+ try:
+ image_id = os_utils.create_glance_image(
+ conn, TESTCASE_CONFIG.image_name,
+ COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
+ container='bare', public='public')
+ image_ids = [image_id]
+
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
+ az_1 = 'nova:' + compute_nodes[0]
+ az_2 = 'nova:' + compute_nodes[1]
+
+ file_path = pkg_resources.resource_filename(
+ 'sdnvpn', TESTCASE_CONFIG.hot_file_name)
+ templ = open(file_path, 'r').read()
+ logger.debug("Template is read: '%s'" % templ)
+ env = test_utils.get_heat_environment(TESTCASE_CONFIG, COMMON_CONFIG)
+ logger.debug("Environment is read: '%s'" % env)
+
+ env['name'] = TESTCASE_CONFIG.stack_name
+ env['template'] = templ
+ env['parameters']['image_n'] = TESTCASE_CONFIG.image_name
+ env['parameters']['av_zone_1'] = az_1
+ env['parameters']['av_zone_2'] = az_2
+
+ stack_id = os_utils.create_stack(conn, **env)
+ if stack_id is None:
+ logger.error('Stack create start failed')
+ raise SystemError('Stack create start failed')
+
+ test_utils.wait_stack_for_status(conn, stack_id, 'CREATE_COMPLETE')
+
+ router_1_output = os_utils.get_output(conn, stack_id, 'router_1_o')
+ router_1_id = router_1_output['output_value']
+ net_2_output = os_utils.get_output(conn, stack_id, 'net_2_o')
+ network_2_id = net_2_output['output_value']
+
+ vm_stack_output_keys = ['vm1_o', 'vm2_o', 'vm3_o', 'vm4_o', 'vm5_o']
+ vms = test_utils.get_vms_from_stack_outputs(conn,
+ stack_id,
+ vm_stack_output_keys)
+
+ logger.debug("Entering base test case with stack '%s'" % stack_id)
+
+ msg = ('Create VPN with eRT<>iRT')
+ results.record_action(msg)
+ vpn_name = 'sdnvpn-' + str(randint(100000, 999999))
+ kwargs = {
+ 'import_targets': TESTCASE_CONFIG.targets1,
+ 'export_targets': TESTCASE_CONFIG.targets2,
+ 'route_distinguishers': TESTCASE_CONFIG.route_distinguishers,
+ 'name': vpn_name
+ }
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate router '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['router_1_name'])
+ results.record_action(msg)
+ results.add_to_summary(0, '-')
+
+ test_utils.create_router_association(
+ neutron_client, bgpvpn_id, router_1_id)
+
+ # Remember: vms[X] is former vm_X+1
+
+ results.get_ping_status(vms[0], vms[1], expected='PASS', timeout=200)
+ results.get_ping_status(vms[0], vms[2], expected='PASS', timeout=30)
+ results.get_ping_status(vms[0], vms[3], expected='FAIL', timeout=30)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['net_2_name'])
+ results.add_to_summary(0, '-')
+ results.record_action(msg)
+ results.add_to_summary(0, '-')
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_router_assoc(
+ neutron_client, bgpvpn_id, router_1_id)
+ test_utils.wait_for_bgp_net_assocs(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ logger.info('Waiting for the VMs to connect to each other using the'
+ ' updated network configuration')
+ test_utils.wait_before_subtest()
+
+ results.get_ping_status(vms[3], vms[4], expected='PASS', timeout=30)
+ # TODO enable again when isolation in VPN with iRT != eRT works
+ # results.get_ping_status(vms[0], vms[3], expected="FAIL", timeout=30)
+ # results.get_ping_status(vms[0], vms[4], expected="FAIL", timeout=30)
+
+ msg = ('Update VPN with eRT=iRT ...')
+ results.add_to_summary(0, "-")
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ # use bgpvpn-create instead of update till NETVIRT-1067 bug is fixed
+ # kwargs = {"import_targets": TESTCASE_CONFIG.targets1,
+ # "export_targets": TESTCASE_CONFIG.targets1,
+ # "name": vpn_name}
+ # bgpvpn = test_utils.update_bgpvpn(neutron_client,
+ # bgpvpn_id, **kwargs)
+
+ test_utils.delete_bgpvpn(neutron_client, bgpvpn_id)
+ bgpvpn_ids.remove(bgpvpn_id)
+ kwargs = {
+ 'import_targets': TESTCASE_CONFIG.targets1,
+ 'export_targets': TESTCASE_CONFIG.targets1,
+ 'route_distinguishers': TESTCASE_CONFIG.route_distinguishers,
+ 'name': vpn_name
+ }
+
+ test_utils.wait_before_subtest()
+
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN re-created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate again network '%s' and router '%s 'to the VPN."
+ % (TESTCASE_CONFIG.heat_parameters['net_2_name'],
+ TESTCASE_CONFIG.heat_parameters['router_1_name']))
+ results.add_to_summary(0, '-')
+ results.record_action(msg)
+ results.add_to_summary(0, '-')
+
+ test_utils.create_router_association(
+ neutron_client, bgpvpn_id, router_1_id)
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_router_assoc(
+ neutron_client, bgpvpn_id, router_1_id)
+ test_utils.wait_for_bgp_net_assoc(
+ neutron_client, bgpvpn_id, network_2_id)
+ # The above code has to be removed after re-enabling bgpvpn-update
+
+ logger.info('Waiting for the VMs to connect to each other using the'
+ ' updated network configuration')
+ test_utils.wait_before_subtest()
+
+ # TODO: uncomment the following once ODL netvirt fixes the following
+ # bug: https://jira.opendaylight.org/browse/NETVIRT-932
+ # results.get_ping_status(vms[0], vms[3], expected="PASS", timeout=30)
+ # results.get_ping_status(vms[0], vms[4], expected="PASS", timeout=30)
+
+ results.add_to_summary(0, '=')
+ logger.info("\n%s" % results.summary)
+
+ except Exception as e:
+ logger.error("exception occurred while executing testcase_4bis: %s", e)
+ raise
+ finally:
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, [], bgpvpn_ids,
+ [], [], [], [])
+
+ try:
+ test_utils.delete_stack_and_wait(conn, stack_id)
+ except Exception as e:
+ logger.error(
+ "exception occurred while executing testcase_4bis: %s", e)
+
+ return results.compile_summary()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_7.py b/sdnvpn/test/functest/testcase_7.py
index 1ad0538..e588b14 100644
--- a/sdnvpn/test/functest/testcase_7.py
+++ b/sdnvpn/test/functest/testcase_7.py
@@ -35,35 +35,34 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
try:
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
network_1_id, subnet_1_id, router_1_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_1_name,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
TESTCASE_CONFIG.router_1_name)
network_2_id, subnet_2_id, router_2_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_2_name,
TESTCASE_CONFIG.subnet_2_name,
TESTCASE_CONFIG.subnet_2_cidr,
@@ -76,23 +75,23 @@ def main():
subnet_ids.extend([subnet_1_id, subnet_2_id])
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- test_utils.open_icmp(neutron_client, sg_id)
- test_utils.open_http_port(neutron_client, sg_id)
+ test_utils.open_icmp(conn, sg_id)
+ test_utils.open_http_port(conn, sg_id)
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
u1 = test_utils.generate_ping_userdata([vm_2_ip])
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -151,26 +150,24 @@ def main():
results.record_action(msg)
results.add_to_summary(0, '-')
- vm2_port = test_utils.get_port(neutron_client,
- vm_2.id)
- fip_added = os_utils.attach_floating_ip(neutron_client,
- vm2_port['id'])
+ vm2_port = test_utils.get_port(conn, vm_2.id)
+ fip_added = os_utils.attach_floating_ip(conn, vm2_port.id)
if fip_added:
results.add_success(msg)
else:
results.add_failure(msg)
- results.ping_ip_test(fip_added['floatingip']['floating_ip_address'])
+ results.ping_ip_test(fip_added.floating_ip_address)
- floatingip_ids.append(fip_added['floatingip']['id'])
+ floatingip_ids.append(fip_added.id)
except Exception as e:
logger.error("exception occurred while executing testcase_7: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
diff --git a/sdnvpn/test/functest/testcase_8.py b/sdnvpn/test/functest/testcase_8.py
index 6336f46..26d1f35 100644
--- a/sdnvpn/test/functest/testcase_8.py
+++ b/sdnvpn/test/functest/testcase_8.py
@@ -33,35 +33,34 @@ TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
def main():
- results = Results(COMMON_CONFIG.line_length)
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
- nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
- glance_client = os_utils.get_glance_client()
(floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
try:
image_id = os_utils.create_glance_image(
- glance_client, TESTCASE_CONFIG.image_name,
+ conn, TESTCASE_CONFIG.image_name,
COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
container="bare", public='public')
image_ids.append(image_id)
network_1_id, subnet_1_id, router_1_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_1_name,
TESTCASE_CONFIG.subnet_1_name,
TESTCASE_CONFIG.subnet_1_cidr,
TESTCASE_CONFIG.router_1_name)
network_2_id, subnet_2_id, router_1_id = test_utils.create_network(
- neutron_client,
+ conn,
TESTCASE_CONFIG.net_2_name,
TESTCASE_CONFIG.subnet_2_name,
TESTCASE_CONFIG.subnet_2_cidr,
@@ -74,29 +73,29 @@ def main():
subnet_ids.extend([subnet_1_id, subnet_2_id])
sg_id = os_utils.create_security_group_full(
- neutron_client, TESTCASE_CONFIG.secgroup_name,
+ conn, TESTCASE_CONFIG.secgroup_name,
TESTCASE_CONFIG.secgroup_descr)
- test_utils.open_icmp(neutron_client, sg_id)
- test_utils.open_http_port(neutron_client, sg_id)
+ test_utils.open_icmp(conn, sg_id)
+ test_utils.open_http_port(conn, sg_id)
- compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
av_zone_1 = "nova:" + compute_nodes[0]
# spawning the VMs on the same compute because fib flow (21) entries
# are not created properly if vm1 and vm2 are attached to two
# different computes
vm_2 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_2_name,
image_id,
network_2_id,
sg_id,
secgroup_name=TESTCASE_CONFIG.secgroup_name,
compute_node=av_zone_1)
- vm_2_ip = test_utils.get_instance_ip(vm_2)
+ vm_2_ip = test_utils.get_instance_ip(conn, vm_2)
u1 = test_utils.generate_ping_userdata([vm_2_ip])
vm_1 = test_utils.create_instance(
- nova_client,
+ conn,
TESTCASE_CONFIG.instance_1_name,
image_id,
network_1_id,
@@ -154,31 +153,30 @@ def main():
msg = "Assign a Floating IP to %s" % vm_1.name
results.record_action(msg)
- vm1_port = test_utils.get_port(neutron_client, vm_1.id)
- fip_added = os_utils.attach_floating_ip(neutron_client,
- vm1_port['id'])
+ vm1_port = test_utils.get_port(conn, vm_1.id)
+ fip_added = os_utils.attach_floating_ip(conn, vm1_port.id)
if fip_added:
results.add_success(msg)
else:
results.add_failure(msg)
- fip = fip_added['floatingip']['floating_ip_address']
+ fip = fip_added.floating_ip_address
results.add_to_summary(0, "=")
results.record_action("Ping %s via Floating IP" % vm_1.name)
results.add_to_summary(0, "-")
results.ping_ip_test(fip)
- floatingip_ids.append(fip_added['floatingip']['id'])
+ floatingip_ids.append(fip_added.id)
except Exception as e:
logger.error("exception occurred while executing testcase_8: %s", e)
raise
finally:
- test_utils.cleanup_nova(nova_client, instance_ids)
- test_utils.cleanup_glance(glance_client, image_ids)
- test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+ test_utils.cleanup_nova(conn, instance_ids)
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,
bgpvpn_ids, interfaces, subnet_ids,
router_ids, network_ids)
diff --git a/sdnvpn/test/functest/testcase_8bis.py b/sdnvpn/test/functest/testcase_8bis.py
new file mode 100644
index 0000000..d850020
--- /dev/null
+++ b/sdnvpn/test/functest/testcase_8bis.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2017 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Test whether router assoc can coexist with floating IP
+# - Create VM1 in net1 with a subnet which is connected to a router
+# which is connected with the gateway
+# - Create VM2 in net2 with a subnet without a router attached.
+# - Create bgpvpn with iRT=eRT
+# - Assoc the router of net1 with bgpvpn and assoc net 2 with the bgpvpn
+# - Try to ping from one VM to the other
+# - Assign a floating IP to the VM in the router assoc network
+# - Ping it the floating ip
+
+import logging
+import sys
+import pkg_resources
+
+from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
+from sdnvpn.lib import utils as test_utils
+from sdnvpn.lib.results import Results
+
+
+logger = logging.getLogger(__name__)
+
+COMMON_CONFIG = sdnvpn_config.CommonConfig()
+TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
+ 'sdnvpn.test.functest.testcase_8bis')
+
+
+def main():
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
+
+ results.add_to_summary(0, "=")
+ results.add_to_summary(2, "STATUS", "SUBTEST")
+ results.add_to_summary(0, "=")
+
+ # neutron client is needed as long as bgpvpn heat module
+ # is not yet installed by default in apex (APEX-618)
+ neutron_client = os_utils.get_neutron_client()
+
+ image_ids = []
+ bgpvpn_ids = []
+
+ try:
+ image_id = os_utils.create_glance_image(
+ conn, TESTCASE_CONFIG.image_name,
+ COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
+ container='bare', public='public')
+ image_ids = [image_id]
+
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
+ az_1 = "nova:" + compute_nodes[0]
+ # spawning the VMs on the same compute because fib flow (21) entries
+ # are not created properly if vm1 and vm2 are attached to two
+ # different computes
+
+ file_path = pkg_resources.resource_filename(
+ 'sdnvpn', TESTCASE_CONFIG.hot_file_name)
+ templ = open(file_path, 'r').read()
+ logger.debug("Template is read: '%s'" % templ)
+ env = test_utils.get_heat_environment(TESTCASE_CONFIG, COMMON_CONFIG)
+ logger.debug("Environment is read: '%s'" % env)
+
+ env['name'] = TESTCASE_CONFIG.stack_name
+ env['parameters']['external_nw'] = os_utils.get_external_net(conn)
+ env['template'] = templ
+ env['parameters']['image_n'] = TESTCASE_CONFIG.image_name
+ env['parameters']['av_zone_1'] = az_1
+
+ stack_id = os_utils.create_stack(conn, **env)
+ if stack_id is None:
+ logger.error('Stack create start failed')
+ raise SystemError('Stack create start failed')
+
+ test_utils.wait_stack_for_status(conn, stack_id, 'CREATE_COMPLETE')
+
+ router_1_output = os_utils.get_output(conn, stack_id, 'router_1_o')
+ router_1_id = router_1_output['output_value']
+ net_2_output = os_utils.get_output(conn, stack_id, 'net_2_o')
+ network_2_id = net_2_output['output_value']
+
+ vm_stack_output_keys = ['vm1_o', 'vm2_o']
+ vms = test_utils.get_vms_from_stack_outputs(conn,
+ stack_id,
+ vm_stack_output_keys)
+
+ logger.debug("Entering base test case with stack '%s'" % stack_id)
+
+ # TODO: check if ODL fixed bug
+ # https://jira.opendaylight.org/browse/NETVIRT-932
+ results.record_action('Create VPN with eRT==iRT')
+ vpn_name = 'sdnvpn-8'
+ kwargs = {
+ 'import_targets': TESTCASE_CONFIG.targets,
+ 'export_targets': TESTCASE_CONFIG.targets,
+ 'route_distinguishers': TESTCASE_CONFIG.route_distinguishers,
+ 'name': vpn_name
+ }
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate router '%s' and net '%s' to the VPN."
+ % (TESTCASE_CONFIG.heat_parameters['router_1_name'],
+ TESTCASE_CONFIG.heat_parameters['net_2_name']))
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ test_utils.create_router_association(
+ neutron_client, bgpvpn_id, router_1_id)
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_router_assoc(
+ neutron_client, bgpvpn_id, router_1_id)
+ test_utils.wait_for_bgp_net_assoc(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ results.get_ping_status(vms[0], vms[1], expected="PASS", timeout=200)
+ results.add_to_summary(0, "=")
+
+ msg = "Assign a Floating IP to %s - using stack update" % vms[0].name
+ results.record_action(msg)
+
+ file_path = pkg_resources.resource_filename(
+ 'sdnvpn', TESTCASE_CONFIG.hot_update_file_name)
+ templ_update = open(file_path, 'r').read()
+ logger.debug("Update template is read: '%s'" % templ_update)
+ templ = test_utils.merge_yaml(templ, templ_update)
+
+ env['name'] = TESTCASE_CONFIG.stack_name
+ env['parameters']['external_nw'] = os_utils.get_external_net(conn)
+ env['template'] = templ
+ env['parameters']['image_n'] = TESTCASE_CONFIG.image_name
+ env['parameters']['av_zone_1'] = az_1
+
+ os_utils.update_stack(conn, stack_id, **env)
+
+ test_utils.wait_stack_for_status(conn, stack_id, 'UPDATE_COMPLETE')
+
+ fip_1_output = os_utils.get_output(conn, stack_id, 'fip_1_o')
+ fip = fip_1_output['output_value']
+
+ results.add_to_summary(0, "=")
+ results.record_action("Ping %s via Floating IP" % vms[0].name)
+ results.add_to_summary(0, "-")
+ results.ping_ip_test(fip)
+
+ except Exception as e:
+ logger.error("exception occurred while executing testcase_8bis: %s", e)
+ raise
+ finally:
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, [], bgpvpn_ids,
+ [], [], [], [])
+
+ try:
+ test_utils.delete_stack_and_wait(conn, stack_id)
+ except Exception as e:
+ logger.error(
+ "exception occurred while executing testcase_8bis: %s", e)
+
+ return results.compile_summary()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/sdnvpn/test/functest/testcase_9.py b/sdnvpn/test/functest/testcase_9.py
index b77360d..c74ceb5 100644
--- a/sdnvpn/test/functest/testcase_9.py
+++ b/sdnvpn/test/functest/testcase_9.py
@@ -15,6 +15,7 @@
# - Verify that the OpenDaylight and gateway Quagga peer
import logging
import sys
+import os
from sdnvpn.lib import config as sdnvpn_config
from sdnvpn.lib import utils as test_utils
@@ -34,12 +35,21 @@ def main():
results.add_to_summary(0, "=")
openstack_nodes = test_utils.get_nodes()
-
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
# node.is_odl() doesn't work in Apex
# https://jira.opnfv.org/browse/RELENG-192
- controllers = [node for node in openstack_nodes
- if "running" in
- node.run_cmd("sudo systemctl status opendaylight")]
+ fuel_cmd = "sudo systemctl status opendaylight"
+ apex_cmd = "sudo docker exec opendaylight_api " \
+ "/opt/opendaylight/bin/status"
+ health_cmd = "sudo docker ps -f name=opendaylight_api -f " \
+ "health=healthy -q"
+ if installer_type in ["fuel"]:
+ controllers = [node for node in openstack_nodes
+ if "running" in node.run_cmd(fuel_cmd)]
+ elif installer_type in ["apex"]:
+ controllers = [node for node in openstack_nodes
+ if node.run_cmd(health_cmd)
+ if "Running" in node.run_cmd(apex_cmd)]
msg = ("Verify that all OpenStack nodes OVS br-int have "
"fail_mode set to secure")
diff --git a/setup.cfg b/setup.cfg
index 583228d..ca4e03b 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,3 +6,7 @@ home-page = https://wiki.opnfv.org/display/sdnvpn/SDNVPN+project+main+page
packages = sdnvpn
scripts =
sdnvpn/test/functest/run_sdnvpn_tests.py
+
+[entry_points]
+xtesting.testcase =
+ bgpvpn = sdnvpn.test.functest.run_sdnvpn_tests:SdnvpnFunctest
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 0000000..646bbae
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,5 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+flake8 # MIT
+yamllint
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..7880718
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,53 @@
+[tox]
+minversion = 1.6
+envlist =
+ docs,
+ docs-linkcheck,
+ pep8,
+ yamllint
+skipsdist = true
+
+[testenv]
+usedevelop = False
+setenv=
+ HOME = {envtmpdir}
+ PYTHONPATH = {toxinidir}
+deps =
+ -chttps://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=master
+ -chttps://git.opnfv.org/functest/plain/upper-constraints.txt?h=master
+ -r{toxinidir}/test-requirements.txt
+ -r{toxinidir}/requirements.txt
+install_command = pip install {opts} {packages}
+
+[testenv:docs]
+basepython = python2.7
+deps = -r{toxinidir}/docs/requirements.txt
+commands =
+ sphinx-build -W -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+basepython = python2.7
+deps = -r{toxinidir}/docs/requirements.txt
+commands = sphinx-build -W -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck
+
+[testenv:yamllint]
+basepython = python2.7
+files =
+ {toxinidir}/docs
+ {toxinidir}/sdnvpn/test/functest/
+commands =
+ yamllint -s {[testenv:yamllint]files}
+
+[testenv:pep8]
+basepython = python2.7
+commands = flake8 {toxinidir}
+
+[flake8]
+# E123, E125 skipped as they are invalid PEP-8.
+
+show-source = True
+ignore = E123,E125
+builtins = _
+exclude = build,dist,doc,legacy,.eggs,.git,.tox,.venv,testapi_venv,venv