aboutsummaryrefslogtreecommitdiffstats
path: root/tools/os_deploy_tgen/osclients/heat.py
diff options
context:
space:
mode:
authoropensource-tnbt <sridhar.rao@spirent.com>2020-11-11 22:55:02 +0530
committeropensource-tnbt <sridhar.rao@spirent.com>2020-11-25 12:27:15 +0530
commit9ec3918b56f1e8862fe140455928cdcd87a2554b (patch)
tree8823eb095639dce15a411f645ce32149c870dfd6 /tools/os_deploy_tgen/osclients/heat.py
parent605102bb6a8a3b48f0c66d817614eec0ef42e017 (diff)
Openstack: Using VSPERF to Test on Openstack.
This patch will support running VSPERF Tests with Openstack. This patch adds the following: 1. Provide --openstack parameter. 2. New Configuration file for openstack 3. Deploy Trafficgenerator based on configuration provided 4. Run Tests after Trafficgenerator are deployed on openstack Update-1: Minor bug-fixes and Documentation Added. Update-2: Add user-config to heat. Update-3: Update Python Requirements Update-4: Add dogpile Update-5: Update decription of the Hot files. Signed-off-by: Sridhar K. N. Rao <sridhar.rao@spirent.com> Change-Id: Iebec356eb893e0e6726cac6a10537b99e41f67f4
Diffstat (limited to 'tools/os_deploy_tgen/osclients/heat.py')
-rwxr-xr-xtools/os_deploy_tgen/osclients/heat.py156
1 files changed, 156 insertions, 0 deletions
diff --git a/tools/os_deploy_tgen/osclients/heat.py b/tools/os_deploy_tgen/osclients/heat.py
new file mode 100755
index 00000000..8681731b
--- /dev/null
+++ b/tools/os_deploy_tgen/osclients/heat.py
@@ -0,0 +1,156 @@
+# Copyright 2020 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Heat Client
+"""
+
+#import sys
+import time
+
+from heatclient import exc
+from oslo_log import log as logging
+from timeout_decorator import timeout
+
+LOG = logging.getLogger(__name__)
+
+
+def create_stack(heat_client, stack_name, template, parameters,
+ environment=None):
+ """
+ Create Stack
+ """
+ stack_params = {
+ 'stack_name': stack_name,
+ 'template': template,
+ 'parameters': parameters,
+ 'environment': environment,
+ }
+
+ stack = heat_client.stacks.create(**stack_params)['stack']
+ LOG.info('New stack: %s', stack)
+
+ wait_stack_completion(heat_client, stack['id'])
+
+ return stack['id']
+
+
+def get_stack_status(heat_client, stack_id):
+ """
+ Get Stack Status
+ """
+ # stack.get operation may take long time and run out of time. The reason
+ # is that it resolves all outputs which is done serially. On the other hand
+ # stack status can be retrieved from the list operation. Internally listing
+ # supports paging and every request should not take too long.
+ for stack in heat_client.stacks.list():
+ if stack.id == stack_id:
+ return stack.status, stack.stack_status_reason
+ else:
+ raise exc.HTTPNotFound(message='Stack %s is not found' % stack_id)
+ return None
+
+def get_id_with_name(heat_client, stack_name):
+ """
+ Get Stack ID by name
+ """
+ # This method isn't really necessary since the Heat client accepts
+ # stack_id and stack_name interchangeably. This is provided more as a
+ # safety net to use ids which are guaranteed to be unique and provides
+ # the benefit of keeping the Shaker code consistent and more easily
+ # traceable.
+ stack = heat_client.stacks.get(stack_name)
+ return stack.id
+
+
+def wait_stack_completion(heat_client, stack_id):
+ """
+ Wait for Stack completion
+ """
+ reason = None
+ status = None
+
+ while True:
+ status, reason = get_stack_status(heat_client, stack_id)
+ LOG.debug('Stack status: %s', status)
+ if status not in ['IN_PROGRESS', '']:
+ break
+
+ time.sleep(5)
+
+ if status != 'COMPLETE':
+ resources = heat_client.resources.list(stack_id)
+ for res in resources:
+ if (res.resource_status != 'CREATE_COMPLETE' and
+ res.resource_status_reason):
+ LOG.error('Heat stack resource %(res)s of type %(type)s '
+ 'failed with %(reason)s',
+ dict(res=res.logical_resource_id,
+ type=res.resource_type,
+ reason=res.resource_status_reason))
+
+ raise exc.StackFailure(stack_id, status, reason)
+
+
+# set the timeout for this method so we don't get stuck polling indefinitely
+# waiting for a delete
+@timeout(600)
+def wait_stack_deletion(heat_client, stack_id):
+ """
+ Wait for stack deletion
+ """
+ try:
+ heat_client.stacks.delete(stack_id)
+ while True:
+ status, reason = get_stack_status(heat_client, stack_id)
+ LOG.debug('Stack status: %s Stack reason: %s', status, reason)
+ if status == 'FAILED':
+ raise exc.StackFailure('Failed to delete stack %s' % stack_id)
+
+ time.sleep(5)
+
+ except TimeoutError:
+ LOG.error('Timed out waiting for deletion of stack %s' % stack_id)
+
+ except exc.HTTPNotFound:
+ # once the stack is gone we can assume it was successfully deleted
+ # clear the exception so it doesn't confuse the logs
+ #if sys.version_info < (3, 0):
+ # sys.exc_clear()
+ LOG.info('Stack %s was successfully deleted', stack_id)
+
+
+def get_stack_outputs(heat_client, stack_id):
+ """
+ Get Stack Output
+ """
+ # try to use optimized way to retrieve outputs, fallback otherwise
+ if hasattr(heat_client.stacks, 'output_list'):
+ try:
+ output_list = heat_client.stacks.output_list(stack_id)['outputs']
+
+ result = {}
+ for output in output_list:
+ output_key = output['output_key']
+ value = heat_client.stacks.output_show(stack_id, output_key)
+ result[output_key] = value['output']['output_value']
+
+ return result
+ except BaseException as err:
+ LOG.info('Cannot get output list, fallback to old way: %s', err)
+
+ outputs_list = heat_client.stacks.get(stack_id).to_dict()['outputs']
+ return dict((item['output_key'], item['output_value'])
+ for item in outputs_list)