summaryrefslogtreecommitdiffstats
path: root/deploy/cloud
diff options
context:
space:
mode:
authorAlexandru Avadanii <Alexandru.Avadanii@enea.com>2017-09-05 13:25:08 +0000
committerGerrit Code Review <gerrit@opnfv.org>2017-09-05 13:25:08 +0000
commita1a413ad65c31ebf5dc42924f7ed04ab02a04872 (patch)
treebfe49085fe03cc6578ca9cd00ea82802bb57d1be /deploy/cloud
parent14d7bf43d3790a0a5fb69c9eff0e93b9fd63c5ba (diff)
parent1b89628e4571a65245a743e4a85d38438a119b3d (diff)
Merge "build, deploy: Remove obsolete Fuel@Openstack code"
Diffstat (limited to 'deploy/cloud')
-rw-r--r--deploy/cloud/configure_environment.py73
-rw-r--r--deploy/cloud/configure_network.py69
-rw-r--r--deploy/cloud/configure_nodes.py194
-rw-r--r--deploy/cloud/configure_settings.py75
-rw-r--r--deploy/cloud/deploy.py109
-rw-r--r--deploy/cloud/deployment.py230
6 files changed, 0 insertions, 750 deletions
diff --git a/deploy/cloud/configure_environment.py b/deploy/cloud/configure_environment.py
deleted file mode 100644
index 0fbf225c6..000000000
--- a/deploy/cloud/configure_environment.py
+++ /dev/null
@@ -1,73 +0,0 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
-from configure_settings import ConfigureSettings
-from configure_network import ConfigureNetwork
-from configure_nodes import ConfigureNodes
-
-from common import (
- E,
- exec_cmd,
- parse,
- err,
- log,
- delete,
- create_dir_if_not_exists,
-)
-
-
-class ConfigureEnvironment(object):
-
- def __init__(self, dea, yaml_config_dir, release_id, node_id_roles_dict):
- self.env_id = None
- self.dea = dea
- self.yaml_config_dir = yaml_config_dir
- self.release_id = release_id
- self.node_id_roles_dict = node_id_roles_dict
- self.required_networks = []
-
- def env_exists(self, env_name):
- env_list = parse(exec_cmd('fuel env --list'))
- for env in env_list:
- if env[E['name']] == env_name and env[E['status']] == 'new':
- self.env_id = env[E['id']]
- return True
- return False
-
- def configure_environment(self):
- log('Configure environment')
- delete(self.yaml_config_dir)
- create_dir_if_not_exists(self.yaml_config_dir)
- env_name = self.dea.get_env_name()
- env_net_segment_type = self.dea.get_env_net_segment_type()
- log('Creating environment %s release %s net-segment-type %s'
- % (env_name, self.release_id, env_net_segment_type))
- exec_cmd('fuel env create --name "%s" --release %s --net-segment-type %s'
- % (env_name, self.release_id, env_net_segment_type))
-
- if not self.env_exists(env_name):
- err('Failed to create environment %s' % env_name)
- self.config_settings()
- self.config_network()
- self.config_nodes()
-
- def config_settings(self):
- settings = ConfigureSettings(self.yaml_config_dir, self.env_id,
- self.dea)
- settings.config_settings()
-
- def config_network(self):
- network = ConfigureNetwork(self.yaml_config_dir, self.env_id, self.dea)
- network.config_network()
-
- def config_nodes(self):
- nodes = ConfigureNodes(self.yaml_config_dir, self.env_id,
- self.node_id_roles_dict, self.dea)
- nodes.config_nodes()
diff --git a/deploy/cloud/configure_network.py b/deploy/cloud/configure_network.py
deleted file mode 100644
index b3ff9e9e0..000000000
--- a/deploy/cloud/configure_network.py
+++ /dev/null
@@ -1,69 +0,0 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
-import yaml
-import io
-
-from common import (
- exec_cmd,
- check_file_exists,
- log,
- backup,
-)
-
-
-class ConfigureNetwork(object):
-
- def __init__(self, yaml_config_dir, env_id, dea):
- self.yaml_config_dir = yaml_config_dir
- self.env_id = env_id
- self.dea = dea
- self.required_networks = []
-
- def download_network_config(self):
- log('Download network config for environment %s' % self.env_id)
- exec_cmd('fuel network --env %s --download --dir %s'
- % (self.env_id, self.yaml_config_dir))
-
- def upload_network_config(self):
- log('Upload network config for environment %s' % self.env_id)
- exec_cmd('fuel network --env %s --upload --dir %s'
- % (self.env_id, self.yaml_config_dir))
-
- def config_network(self):
- log('Configure network')
- self.download_network_config()
- self.modify_network_config()
- self.upload_network_config()
-
- def modify_network_config(self):
- log('Modify network config for environment %s' % self.env_id)
- network_yaml = ('%s/network_%s.yaml'
- % (self.yaml_config_dir, self.env_id))
- check_file_exists(network_yaml)
- backup(network_yaml)
-
- network_config = self.dea.get_property('network')
-
- with io.open(network_yaml) as stream:
- network = yaml.load(stream)
-
- net_names = self.dea.get_network_names()
- net_id = {}
- for net in network['networks']:
- if net['name'] in net_names:
- net_id[net['name']] = {'id': net['id'],
- 'group_id': net['group_id']}
-
- for network in network_config['networks']:
- network.update(net_id[network['name']])
-
- with io.open(network_yaml, 'w') as stream:
- yaml.dump(network_config, stream, default_flow_style=False)
diff --git a/deploy/cloud/configure_nodes.py b/deploy/cloud/configure_nodes.py
deleted file mode 100644
index a50973af6..000000000
--- a/deploy/cloud/configure_nodes.py
+++ /dev/null
@@ -1,194 +0,0 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-import copy
-import io
-
-import six
-import yaml
-
-from common import (
- exec_cmd,
- check_file_exists,
- log,
- backup,
-)
-
-
-class ConfigureNodes(object):
-
- def __init__(self, yaml_config_dir, env_id, node_id_roles_dict, dea):
- self.yaml_config_dir = yaml_config_dir
- self.env_id = env_id
- self.node_id_roles_dict = node_id_roles_dict
- self.dea = dea
-
- def config_nodes(self):
- log('Configure nodes')
-
- # Assign nodes to environment with given roles
- for node_id, roles_blade in self.node_id_roles_dict.iteritems():
- exec_cmd('fuel node set --node-id %s --role %s --env %s'
- % (node_id, roles_blade[0], self.env_id))
-
- for node_id, roles_blade in self.node_id_roles_dict.iteritems():
- # Modify interfaces configuration
- self.download_interface_config(node_id)
- self.modify_node_interface(node_id, roles_blade)
- self.upload_interface_config(node_id)
- # Modify node attributes
- self.download_attributes(node_id)
- self.modify_node_attributes(node_id, roles_blade)
- self.upload_attributes(node_id)
-
- # Currently not used, we use default deployment facts
- # which are generated by fuel based on type segmentation
- # and network to nic assignment
- #
- # Download our modified deployment configuration, which includes our
- # changes to network topology etc.
- #self.download_deployment_config()
- #for node_id, roles_blade in self.node_id_roles_dict.iteritems():
- # self.modify_node_network_schemes(node_id, roles_blade)
- #self.upload_deployment_config()
-
- def modify_node_network_schemes(self, node_id, roles_blade):
- log('Modify network transformations for node %s' % node_id)
- type = self.dea.get_node_property(roles_blade[1], 'transformations')
- transformations = self.dea.get_property(type)
- deployment_dir = '%s/deployment_%s' % (
- self.yaml_config_dir, self.env_id)
- backup(deployment_dir)
- node_file = ('%s/%s.yaml' % (deployment_dir, node_id))
- with io.open(node_file) as stream:
- node = yaml.load(stream)
-
- node['network_scheme'].update(transformations)
-
- with io.open(node_file, 'w') as stream:
- yaml.dump(node, stream, default_flow_style=False)
-
- def download_deployment_config(self):
- log('Download deployment config for environment %s' % self.env_id)
- exec_cmd('fuel deployment --env %s --default --dir %s'
- % (self.env_id, self.yaml_config_dir))
-
- def upload_deployment_config(self):
- log('Upload deployment config for environment %s' % self.env_id)
- exec_cmd('fuel deployment --env %s --upload --dir %s'
- % (self.env_id, self.yaml_config_dir))
-
- def download_interface_config(self, node_id):
- log('Download interface config for node %s' % node_id)
- exec_cmd('fuel node --env %s --node %s --network --download '
- '--dir %s' % (self.env_id, node_id, self.yaml_config_dir))
-
- def upload_interface_config(self, node_id):
- log('Upload interface config for node %s' % node_id)
- exec_cmd('fuel node --env %s --node %s --network --upload '
- '--dir %s' % (self.env_id, node_id, self.yaml_config_dir))
-
- def download_attributes(self, node_id):
- log('Download attributes for node %s' % node_id)
- exec_cmd('fuel node --env %s --node %s --attributes --download '
- '--dir %s' % (self.env_id, node_id, self.yaml_config_dir))
-
- def upload_attributes(self, node_id):
- log('Upload attributes for node %s' % node_id)
- exec_cmd('fuel node --env %s --node %s --attributes --upload '
- '--dir %s' % (self.env_id, node_id, self.yaml_config_dir))
-
- def modify_node_attributes(self, node_id, roles_blade):
- log('Modify attributes for node {0}'.format(node_id))
- dea_key = self.dea.get_node_property(roles_blade[1], 'attributes')
- if not dea_key:
- # Node attributes are not overridden. Nothing to do.
- return
- new_attributes = self.dea.get_property(dea_key)
- attributes_yaml = ('%s/node_%s/attributes.yaml'
- % (self.yaml_config_dir, node_id))
- check_file_exists(attributes_yaml)
- backup('%s/node_%s' % (self.yaml_config_dir, node_id))
-
- with open(attributes_yaml) as stream:
- attributes = yaml.load(stream)
- result_attributes = self._merge_dicts(attributes, new_attributes)
-
- with open(attributes_yaml, 'w') as stream:
- yaml.dump(result_attributes, stream, default_flow_style=False)
-
- # interface configuration can
- # looks like this:
- #
- # interfaces_dpdk:
- # ens3:
- # - fuelweb_admin
- # ens4:
- # - storage
- # - management
- # ens5:
- # - interface_properties:
- # dpdk:
- # enabled:
- # value: true
- # - private
- # ens6:
- # - public
- def modify_node_interface(self, node_id, roles_blade):
- log('Modify interface config for node %s' % node_id)
- interface_yaml = ('%s/node_%s/interfaces.yaml'
- % (self.yaml_config_dir, node_id))
- check_file_exists(interface_yaml)
- backup('%s/node_%s' % (self.yaml_config_dir, node_id))
-
- with io.open(interface_yaml) as stream:
- interfaces = yaml.load(stream)
-
- net_name_id = {}
- for interface in interfaces:
- for network in interface['assigned_networks']:
- net_name_id[network['name']] = network['id']
-
- type = self.dea.get_node_property(roles_blade[1], 'interfaces')
- interface_config = self.dea.get_property(type)
-
- for interface in interfaces:
- interface['assigned_networks'] = []
- if interface['name'] in interface_config:
- for prop in interface_config[interface['name']]:
- net = {}
- # net name
- if isinstance(prop, six.string_types):
- net['id'] = net_name_id[prop]
- net['name'] = prop
- interface['assigned_networks'].append(net)
- # network properties
- elif isinstance(prop, dict):
- if 'interface_properties' not in prop:
- log('Interface configuration contains unknown dict: %s' % prop)
- continue
- interface['attributes'] = self._merge_dicts(
- interface.get('attributes', {}),
- prop.get('interface_properties', {}))
-
- with io.open(interface_yaml, 'w') as stream:
- yaml.dump(interfaces, stream, default_flow_style=False)
-
- def _merge_dicts(self, dict1, dict2):
- """Recursively merge dictionaries."""
- result = copy.deepcopy(dict1)
- for k, v in six.iteritems(dict2):
- if isinstance(result.get(k), list) and isinstance(v, list):
- result[k].extend(v)
- continue
- if isinstance(result.get(k), dict) and isinstance(v, dict):
- result[k] = self._merge_dicts(result[k], v)
- continue
- result[k] = copy.deepcopy(v)
- return result
diff --git a/deploy/cloud/configure_settings.py b/deploy/cloud/configure_settings.py
deleted file mode 100644
index b60a60fd1..000000000
--- a/deploy/cloud/configure_settings.py
+++ /dev/null
@@ -1,75 +0,0 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
-import yaml
-import io
-
-from common import (
- exec_cmd,
- check_file_exists,
- log,
- backup,
-)
-
-
-class ConfigureSettings(object):
-
- def __init__(self, yaml_config_dir, env_id, dea):
- self.yaml_config_dir = yaml_config_dir
- self.env_id = env_id
- self.dea = dea
-
- def download_settings(self):
- log('Download settings for environment %s' % self.env_id)
- exec_cmd('fuel settings --env %s --download --dir %s'
- % (self.env_id, self.yaml_config_dir))
-
- def upload_settings(self):
- log('Upload settings for environment %s' % self.env_id)
- exec_cmd('fuel settings --env %s --upload --dir %s'
- % (self.env_id, self.yaml_config_dir))
-
- def config_settings(self):
- log('Configure settings')
- self.download_settings()
- self.modify_settings()
- self.upload_settings()
-
- def modify_settings(self):
- log('Modify settings for environment %s' % self.env_id)
- settings_yaml = ('%s/settings_%s.yaml'
- % (self.yaml_config_dir, self.env_id))
- check_file_exists(settings_yaml)
-
- with io.open(settings_yaml, 'r') as stream:
- orig_dea = yaml.load(stream)
-
- backup(settings_yaml)
- settings = self.dea.get_property('settings')
- # Copy fuel defined plugin_id's to user defined settings
- # From Fuel 8.0 chosen_id was added because it is now
- # possible to install many version of the same plugin
- # but we will install only one version
- for plugin in orig_dea['editable']:
- if 'metadata' in orig_dea['editable'][plugin]:
- if 'plugin_id' in orig_dea['editable'][plugin]['metadata']:
- if not plugin in settings['editable']:
- settings['editable'][plugin] = orig_dea['editable'][plugin]
- else:
- settings['editable'][plugin]["metadata"]["plugin_id"] = orig_dea['editable'][plugin]["metadata"]["plugin_id"]
- elif 'chosen_id' in orig_dea['editable'][plugin]['metadata']:
- if not plugin in settings['editable']:
- settings['editable'][plugin] = orig_dea['editable'][plugin]
- else:
- settings['editable'][plugin]['metadata']['chosen_id'] = orig_dea['editable'][plugin]['metadata']['chosen_id']
- settings['editable'][plugin]['metadata']['versions'][0]['metadata']['plugin_id'] = orig_dea['editable'][plugin]['metadata']['versions'][0]['metadata']['plugin_id']
-
- with io.open(settings_yaml, 'w') as stream:
- yaml.dump(settings, stream, default_flow_style=False)
diff --git a/deploy/cloud/deploy.py b/deploy/cloud/deploy.py
deleted file mode 100644
index dac2fe8dd..000000000
--- a/deploy/cloud/deploy.py
+++ /dev/null
@@ -1,109 +0,0 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-
-from dea import DeploymentEnvironmentAdapter
-from configure_environment import ConfigureEnvironment
-from deployment import Deployment
-
-from common import (
- R,
- exec_cmd,
- parse,
- check_file_exists,
- commafy,
- ArgParser,
- log,
-)
-
-YAML_CONF_DIR = '/var/lib/opnfv'
-
-
-class Deploy(object):
-
- def __init__(self, dea_file, no_health_check, deploy_timeout,
- no_deploy_environment):
- self.dea = DeploymentEnvironmentAdapter(dea_file)
- self.no_health_check = no_health_check
- self.deploy_timeout = deploy_timeout
- self.no_deploy_environment = no_deploy_environment
- self.macs_per_blade = {}
- self.blades = self.dea.get_node_ids()
- self.blade_node_dict = self.dea.get_blade_node_map()
- self.node_roles_dict = {}
- self.env_id = None
- self.wanted_release = self.dea.get_property('wanted_release')
-
- def assign_roles_to_cluster_node_ids(self):
- self.node_roles_dict = {}
- for blade, node in self.blade_node_dict.iteritems():
- if self.dea.get_node_roles(blade):
- roles = commafy(self.dea.get_node_roles(blade))
- self.node_roles_dict[node] = (roles, blade)
-
- def configure_environment(self):
- release_list = parse(exec_cmd('fuel release -l'))
- for release in release_list:
- if release[R['name']] == self.wanted_release:
- break
- config_env = ConfigureEnvironment(self.dea, YAML_CONF_DIR,
- release[R['id']],
- self.node_roles_dict)
- config_env.configure_environment()
- self.env_id = config_env.env_id
-
- def deploy_cloud(self):
- dep = Deployment(self.dea, YAML_CONF_DIR, self.env_id,
- self.node_roles_dict, self.no_health_check,
- self.deploy_timeout)
- if not self.no_deploy_environment:
- dep.deploy()
- else:
- log('Configuration is done. Deployment is not launched.')
-
- def deploy(self):
-
- self.assign_roles_to_cluster_node_ids()
-
- self.configure_environment()
-
- self.deploy_cloud()
-
-
-def parse_arguments():
- parser = ArgParser(prog='python %s' % __file__)
- parser.add_argument('-nh', dest='no_health_check', action='store_true',
- default=False,
- help='Don\'t run health check after deployment')
- parser.add_argument('-dt', dest='deploy_timeout', action='store',
- default=240, help='Deployment timeout (in minutes) '
- '[default: 240]')
- parser.add_argument('-nde', dest='no_deploy_environment',
- action='store_true', default=False,
- help=('Do not launch environment deployment'))
- parser.add_argument('dea_file', action='store',
- help='Deployment Environment Adapter: dea.yaml')
-
- args = parser.parse_args()
- check_file_exists(args.dea_file)
-
- kwargs = {'dea_file': args.dea_file,
- 'no_health_check': args.no_health_check,
- 'deploy_timeout': args.deploy_timeout,
- 'no_deploy_environment': args.no_deploy_environment}
- return kwargs
-
-
-def main():
- kwargs = parse_arguments()
- deploy = Deploy(**kwargs)
- deploy.deploy()
-
-if __name__ == '__main__':
- main()
diff --git a/deploy/cloud/deployment.py b/deploy/cloud/deployment.py
deleted file mode 100644
index 4329a4cec..000000000
--- a/deploy/cloud/deployment.py
+++ /dev/null
@@ -1,230 +0,0 @@
-###############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# szilard.cserey@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-###############################################################################
-
-import time
-import re
-import json
-
-from common import (
- N,
- exec_cmd,
- parse,
- err,
- log,
-)
-
-SEARCH_TEXT = '(err)'
-LOG_FILE = '/var/log/puppet.log'
-GREP_LINES_OF_LEADING_CONTEXT = 100
-GREP_LINES_OF_TRAILING_CONTEXT = 100
-LIST_OF_CHAR_TO_BE_ESCAPED = ['[', ']', '"']
-ERROR_MSGS = ['Critical nodes are not available for deployment',
- 'offline. Remove them from environment and try again.',
- 'Task[move_to_bootstrap/',
- 'Failed tasks: Task[connectivity-checker/']
-
-
-class DeployNotStart(Exception):
- """Unable to start deployment"""
-
-
-class NodesGoOffline(Exception):
- """Nodes goes offline during deployment"""
-
-
-class Deployment(object):
-
- def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict,
- no_health_check, deploy_timeout):
- self.dea = dea
- self.yaml_config_dir = yaml_config_dir
- self.env_id = env_id
- self.node_id_roles_dict = node_id_roles_dict
- self.no_health_check = no_health_check
- self.deploy_timeout = deploy_timeout
- self.pattern = re.compile(
- '\d\d\d\d-\d\d-\d\d\s\d\d:\d\d:\d\d')
-
- def collect_error_logs(self):
- for node_id, roles_blade in self.node_id_roles_dict.iteritems():
- log_list = []
- cmd = ('ssh -q node-%s grep \'"%s"\' %s'
- % (node_id, SEARCH_TEXT, LOG_FILE))
- results, _ = exec_cmd(cmd, False)
- for result in results.splitlines():
- log_msg = ''
-
- sub_cmd = '"%s" %s' % (result, LOG_FILE)
- for c in LIST_OF_CHAR_TO_BE_ESCAPED:
- sub_cmd = sub_cmd.replace(c, '\%s' % c)
- grep_cmd = ('grep -B%s %s'
- % (GREP_LINES_OF_LEADING_CONTEXT, sub_cmd))
- cmd = ('ssh -q node-%s "%s"' % (node_id, grep_cmd))
-
- details, _ = exec_cmd(cmd, False)
- details_list = details.splitlines()
-
- found_prev_log = False
- for i in range(len(details_list) - 2, -1, -1):
- if self.pattern.match(details_list[i]):
- found_prev_log = True
- break
- if found_prev_log:
- log_msg += '\n'.join(details_list[i:-1]) + '\n'
-
- grep_cmd = ('grep -A%s %s'
- % (GREP_LINES_OF_TRAILING_CONTEXT, sub_cmd))
- cmd = ('ssh -q node-%s "%s"' % (node_id, grep_cmd))
-
- details, _ = exec_cmd(cmd, False)
- details_list = details.splitlines()
-
- found_next_log = False
- for i in range(1, len(details_list)):
- if self.pattern.match(details_list[i]):
- found_next_log = True
- break
- if found_next_log:
- log_msg += '\n'.join(details_list[:i])
- else:
- log_msg += details
-
- if log_msg:
- log_list.append(log_msg)
-
- if log_list:
- role = ('controller' if 'controller' in roles_blade[0]
- else 'compute host')
- log('_' * 40 + 'Errors in node-%s %s' % (node_id, role)
- + '_' * 40)
- for log_msg in log_list:
- print(log_msg + '\n')
-
- def run_deploy(self):
- SLEEP_TIME = 60
- abort_after = 60 * int(self.deploy_timeout)
- start = time.time()
-
- log('Starting deployment of environment %s' % self.env_id)
- deploy_id = None
- ready = False
- timeout = False
-
- attempts = 5
- while attempts > 0:
- try:
- if time.time() > start + abort_after:
- timeout = True
- break
- if not deploy_id:
- deploy_id = self._start_deploy_task()
- sts, prg, msg = self._deployment_status(deploy_id)
- if sts == 'error':
- log('Error during deployment: {}'.format(msg))
- break
- if sts == 'running':
- log('Environment deployment progress: {}%'.format(prg))
- elif sts == 'ready':
- ready = True
- break
- time.sleep(SLEEP_TIME)
- except (DeployNotStart, NodesGoOffline) as e:
- log(e)
- attempts -= 1
- deploy_id = None
- time.sleep(SLEEP_TIME * attempts)
-
- if timeout:
- err('Deployment timed out, environment %s is not operational, '
- 'snapshot will not be performed'
- % self.env_id)
- if ready:
- log('Environment %s successfully deployed'
- % self.env_id)
- else:
- self.collect_error_logs()
- err('Deployment failed, environment %s is not operational'
- % self.env_id, self.collect_logs)
-
- def _start_deploy_task(self):
- out, _ = exec_cmd('fuel2 env deploy {}'.format(self.env_id), False)
- id = self._deployment_task_id(out)
- return id
-
- def _deployment_task_id(self, response):
- response = str(response)
- if response.startswith('Deployment task with id'):
- for s in response.split():
- if s.isdigit():
- return int(s)
- raise DeployNotStart('Unable to start deployment: {}'.format(response))
-
- def _deployment_status(self, id):
- task = self._task_fields(id)
- if task['status'] == 'error':
- if any(msg in task['message'] for msg in ERROR_MSGS):
- raise NodesGoOffline(task['message'])
- return task['status'], task['progress'], task['message']
-
- def _task_fields(self, id):
- try:
- out, _ = exec_cmd('fuel2 task show {} -f json'.format(id), False)
- task_info = json.loads(out)
- properties = {}
- # for 9.0 this can be list of dicts or dict
- # see https://bugs.launchpad.net/fuel/+bug/1625518
- if isinstance(task_info, list):
- for d in task_info:
- properties.update({d['Field']: d['Value']})
- else:
- return task_info
- return properties
- except ValueError as e:
- err('Unable to fetch task info: {}'.format(e))
-
- def collect_logs(self):
- log('Cleaning out any previous deployment logs')
- exec_cmd('rm -f /var/log/remote/fuel-snapshot-*', False)
- exec_cmd('rm -f /root/deploy-*', False)
- log('Generating Fuel deploy snap-shot')
- if exec_cmd('fuel snapshot < /dev/null &> snapshot.log', False)[1] != 0:
- log('Could not create a Fuel snapshot')
- else:
- exec_cmd('mv /root/fuel-snapshot* /var/log/remote/', False)
-
- log('Collecting all Fuel Snapshot & deploy log files')
- r, _ = exec_cmd('tar -czhf /root/deploy-%s.log.tar.gz /var/log/remote' % time.strftime("%Y%m%d-%H%M%S"), False)
- log(r)
-
- def verify_node_status(self):
- node_list = parse(exec_cmd('fuel --env %s node' % self.env_id))
- failed_nodes = []
- for node in node_list:
- if node[N['status']] != 'ready':
- failed_nodes.append((node[N['id']], node[N['status']]))
-
- if failed_nodes:
- summary = ''
- for node, status in failed_nodes:
- summary += '[node %s, status %s]\n' % (node, status)
- err('Deployment failed: %s' % summary, self.collect_logs)
-
- def health_check(self):
- log('Now running sanity and smoke health checks')
- r = exec_cmd('fuel health --env %s --check sanity,smoke --force' % self.env_id)
- log(r)
- if 'failure' in r:
- err('Healthcheck failed!', self.collect_logs)
-
- def deploy(self):
- self.run_deploy()
- self.verify_node_status()
- if not self.no_health_check:
- self.health_check()
- self.collect_logs()