aboutsummaryrefslogtreecommitdiffstats
path: root/lib/auto/testcase/resiliency/AutoResilItfCloud.py
diff options
context:
space:
mode:
authorGerard Damm <gerard.damm@wipro.com>2018-06-21 21:10:11 -0500
committerGerard Damm <gerard.damm@wipro.com>2018-06-21 21:10:11 -0500
commitdffc2bee5650b10831f3792b162b6ea73a4624ae (patch)
treee528528510df9375327887eff956e2afcda91054 /lib/auto/testcase/resiliency/AutoResilItfCloud.py
parent27fa5fa463e835b424636a567165b7740c9ebfb6 (diff)
checked code logic for UC2-vim001 recovery time measurement
JIRA: AUTO-38 Leveraging new OpenStack SDK release 0.14, tested existing code for UC2/vif-001 (simulated VM failure), to verify that generic code for recovery time measurement works. Entered test code in placeholders for this test case: challenge start (suspend a VM), challenge stop (resume, although this was done outside (from Horizon), to simulate an ONAP-controlled recovery), test code (periodic status check, stop when VM is active again). Time was measured correctly, output files (.csv) were created successfully. This will allow to close Jira ticket 38. Next steps will be to interface with ONAP, to obtain VNF info (especially the ID of the corresponding OpenStack VMs), and perform the same measurement, which this time will be an actual recovery time, provided DCAE data collection, Policy and CLAMP have the proper configuration. Change-Id: I7320f7570c119d2b59d1ed6ca787ba975ad04a44 Signed-off-by: Gerard Damm <gerard.damm@wipro.com>
Diffstat (limited to 'lib/auto/testcase/resiliency/AutoResilItfCloud.py')
-rw-r--r--lib/auto/testcase/resiliency/AutoResilItfCloud.py30
1 files changed, 6 insertions, 24 deletions
diff --git a/lib/auto/testcase/resiliency/AutoResilItfCloud.py b/lib/auto/testcase/resiliency/AutoResilItfCloud.py
index 302a662..7feb518 100644
--- a/lib/auto/testcase/resiliency/AutoResilItfCloud.py
+++ b/lib/auto/testcase/resiliency/AutoResilItfCloud.py
@@ -159,7 +159,8 @@ def gdtest_openstack():
# Method 1 (preferred) : assume there is a clouds.yaml file in PATH, starting path search with local directory
#conn = openstack.connect(cloud='armopenstack', region_name='RegionOne')
#conn = openstack.connect(cloud='hpe16openstackEuphrates', region_name='RegionOne')
- conn = openstack.connect(cloud='hpe16openstackFraser', region_name='RegionOne')
+ #conn = openstack.connect(cloud='hpe16openstackFraser', region_name='RegionOne')
+ conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')
# if getting error: AttributeError: module 'openstack' has no attribute 'connect', check that openstack is installed for this python version
@@ -208,8 +209,8 @@ def gdtest_openstack():
openstack_list_projects(conn)
openstack_list_domains(conn)
- # VM: hpe16-Auto-UC2-gdtest-compute1
- gds_ID = '715c677a-7914-4ca8-8c6d-75bf29eeb940'
+ # VM test: create a test VM in the OpenStack instance, enter its ID here
+ gds_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0'
gds = conn.compute.get_server(gds_ID)
print('\ngds.name=',gds.name)
print('gds.status=',gds.status)
@@ -229,27 +230,8 @@ def gdtest_openstack():
- #VM: test3
- gds_ID = 'd3ceffc3-5967-4f18-b8b5-b1b2bd7ab76d'
- gds = conn.compute.get_server(gds_ID)
- print('\ngds.name=',gds.name)
- print('gds.status=',gds.status)
- print('suspending...')
- conn.compute.suspend_server(gds_ID) # NOT synchronous: returns before suspension action is completed
- wait_seconds = 10
- print(' waiting',wait_seconds,'seconds...')
- time.sleep(wait_seconds)
- gds = conn.compute.get_server(gds_ID) # need to refresh data; not maintained live
- print('gds.status=',gds.status)
- print('resuming...')
- conn.compute.resume_server(gds_ID)
- print(' waiting',wait_seconds,'seconds...')
- time.sleep(wait_seconds)
- gds = conn.compute.get_server(gds_ID) # need to refresh data; not maintained live
- print('gds.status=',gds.status)
-
- #Volume: hpe16-Auto-UC2-gdtest-volume1
- gdv_ID = '5a6c1dbd-5097-4a9b-8f79-6f03cde18bf6'
+ #Volume test: volume attached to test VM; get its ID and enter it here
+ gdv_ID = 'd0206ff2-507c-444a-9871-b5b7ea704994'
gdv = conn.block_storage.get_volume(gdv_ID)
# no API for stopping/restarting a volume... only delete. ONAP would have to completely migrate a VNF depending on this volume
print('\ngdv.name=',gdv.name)