summaryrefslogtreecommitdiffstats
path: root/deploy
diff options
context:
space:
mode:
Diffstat (limited to 'deploy')
-rw-r--r--deploy/README20
-rw-r--r--deploy/cloud/deployment.py52
-rw-r--r--deploy/common.py38
-rwxr-xr-xdeploy/deploy.py15
-rw-r--r--deploy/deploy_env.py14
-rw-r--r--deploy/dha_adapters/ipmi_adapter.py23
-rw-r--r--deploy/dha_adapters/zte_adapter.py7
-rw-r--r--deploy/environments/virtual_fuel.py21
-rw-r--r--deploy/reap.py7
-rw-r--r--deploy/ssh_client.py4
10 files changed, 141 insertions, 60 deletions
diff --git a/deploy/README b/deploy/README
index fd1548344..40f95ef92 100644
--- a/deploy/README
+++ b/deploy/README
@@ -14,7 +14,7 @@ the following dependencies and python modules are required to be installed:
- for Ubuntu:
sudo apt-get install -y libvirt-bin qemu-kvm python-pip fuseiso mkisofs genisoimage
-sudo apt-get install -y python-dev libz-dev libxml2-dev libxslt-dev
+sudo apt-get install -y python-dev libz-dev libxml2-dev libxslt-dev libyaml-dev
sudo pip install pyyaml netaddr paramiko lxml scp pycrypto ecdsa
During libvirt install the user is added to the libvirtd group, so you have to
@@ -84,41 +84,41 @@ optional arguments:
-np Do not install Fuel Plugins
-dt DEPLOY_TIMEOUT Deployment timeout (in minutes) [default: 240]
-nde Do not launch environment deployment
-
+ -log [LOG_FILE] Deployment log path and file name
* EXAMPLES:
- Install Fuel Master and deploy OPNFV Cloud from scratch on Hardware Environment:
- sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr
+ sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr -log ~/Deployment-888.log.tar.gz
- Install Fuel Master and deploy OPNFV Cloud from scratch on Virtual Environment:
- sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images
+ sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images -log ~/Deployment-888.log.tar.gz
- Deploy OPNFV Cloud on an already active Environment where Fuel Master VM is running so no need to install Fuel again:
- sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml
+ sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -log ~/Deployment-888.log.tar.gz
=> with plugin installation
- sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml
+ sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -log ~/Deployment-888.log.tar.gz
=> with cleanup after deployment is finished
- sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -c
+ sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -c -log ~/Deployment-888.log.tar.gz
=> no healthcheck after deployment is completed
- sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -nh
+ sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -nh -log ~/Deployment-888.log.tar.gz
- Install Fuel Master only (and Node VMs when using virtual environment):
=> for virtual environment:
- sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images
+ sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images -log ~/Deployment-888.log.tar.gz
=> for hardware environment:
- sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr
+ sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr -log ~/Deployment-888.log.tar.gz
- Cleanup a running OPNFV environment:
diff --git a/deploy/cloud/deployment.py b/deploy/cloud/deployment.py
index 0127d2a52..f8e1617f8 100644
--- a/deploy/cloud/deployment.py
+++ b/deploy/cloud/deployment.py
@@ -7,7 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
###############################################################################
-
import time
import re
@@ -16,6 +15,8 @@ from common import (
E,
exec_cmd,
run_proc,
+ run_proc_wait_terminated,
+ run_proc_kill,
parse,
err,
log,
@@ -30,6 +31,7 @@ LIST_OF_CHAR_TO_BE_ESCAPED = ['[', ']', '"']
class Deployment(object):
+
def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict,
no_health_check, deploy_timeout):
self.dea = dea
@@ -41,6 +43,7 @@ class Deployment(object):
self.pattern = re.compile(
'\d\d\d\d-\d\d-\d\d\s\d\d:\d\d:\d\d')
+
def collect_error_logs(self):
for node_id, roles_blade in self.node_id_roles_dict.iteritems():
log_list = []
@@ -96,13 +99,14 @@ class Deployment(object):
for log_msg in log_list:
print(log_msg + '\n')
+
def run_deploy(self):
SLEEP_TIME = 60
LOG_FILE = 'cloud.log'
log('Starting deployment of environment %s' % self.env_id)
- p = run_proc('fuel --env %s deploy-changes | strings > %s'
- % (self.env_id, LOG_FILE))
+ deploy_proc = run_proc('fuel --env %s deploy-changes | strings > %s'
+ % (self.env_id, LOG_FILE))
ready = False
for i in range(int(self.deploy_timeout)):
@@ -120,19 +124,37 @@ class Deployment(object):
else:
time.sleep(SLEEP_TIME)
- p.poll()
- if p.returncode == None:
- log('The process deploying the changes has not yet finished.')
- log('''The file %s won't be deleted''' % LOG_FILE)
- else:
- delete(LOG_FILE)
+ if (env[0][E['status']] <> 'operational'
+ and env[0][E['status']] <> 'error'
+ and env[0][E['status']] <> 'stopped'):
+ err('Deployment timed out, environment %s is not operational, snapshot will not be performed'
+ % self.env_id, self.collect_logs)
+
+ run_proc_wait_terminated(deploy_proc)
+ delete(LOG_FILE)
if ready:
log('Environment %s successfully deployed' % self.env_id)
else:
self.collect_error_logs()
err('Deployment failed, environment %s is not operational'
- % self.env_id)
+ % self.env_id, self.collect_logs)
+
+
+ def collect_logs(self):
+ log('Cleaning out any previous deployment logs')
+ exec_cmd('rm -f /var/log/remote/fuel-snapshot-*', False)
+ exec_cmd('rm -f /root/deploy-*', False)
+ log('Generating Fuel deploy snap-shot')
+ if exec_cmd('fuel snapshot < /dev/null &> snapshot.log', False)[1] <> 0:
+ log('Could not create a Fuel snapshot')
+ else:
+ exec_cmd('mv /root/fuel-snapshot* /var/log/remote/', False)
+
+ log('Collecting all Fuel Snapshot & deploy log files')
+ r, _ = exec_cmd('tar -czhf /root/deploy-%s.log.tar.gz /var/log/remote' % time.strftime("%Y%m%d-%H%M%S"), False)
+ log(r)
+
def verify_node_status(self):
node_list = parse(exec_cmd('fuel node list'))
@@ -145,18 +167,20 @@ class Deployment(object):
summary = ''
for node, status in failed_nodes:
summary += '[node %s, status %s]\n' % (node, status)
- err('Deployment failed: %s' % summary)
+ err('Deployment failed: %s' % summary, self.collect_logs)
+
def health_check(self):
log('Now running sanity and smoke health checks')
- r = exec_cmd('fuel health --env %s --check sanity,smoke --force'
- % self.env_id)
+ r = exec_cmd('fuel health --env %s --check sanity,smoke --force' % self.env_id)
log(r)
if 'failure' in r:
- err('Healthcheck failed!')
+ err('Healthcheck failed!', self.collect_logs)
+
def deploy(self):
self.run_deploy()
self.verify_node_status()
if not self.no_health_check:
self.health_check()
+ self.collect_logs()
diff --git a/deploy/common.py b/deploy/common.py
index 3cd3e0e6e..9654b3771 100644
--- a/deploy/common.py
+++ b/deploy/common.py
@@ -1,6 +1,7 @@
###############################################################################
# Copyright (c) 2015 Ericsson AB and others.
# szilard.cserey@ericsson.com
+# peter.barabas@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
@@ -38,7 +39,21 @@ out_handler.setFormatter(formatter)
LOG.addHandler(out_handler)
os.chmod(LOGFILE, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
-def exec_cmd(cmd, check=True, attempts=1, delay=5, verbose=False):
+
+def mask_arguments(cmd, mask_args, mask_str):
+ cmd_line = cmd.split()
+ for pos in mask_args:
+ # Don't mask the actual command; also check if we don't reference
+ # beyond bounds
+ if pos == 0 or pos >= len(cmd_line):
+ continue
+ cmd_line[pos] = mask_str
+ return ' '.join(cmd_line)
+
+
+def exec_cmd(cmd, check=True, attempts=1, delay=5, verbose=False, mask_args=[], mask_str='*****'):
+ masked_cmd = mask_arguments(cmd, mask_args, mask_str)
+
# a negative value means forever
while attempts != 0:
attempts = attempts - 1
@@ -52,18 +67,18 @@ def exec_cmd(cmd, check=True, attempts=1, delay=5, verbose=False):
break
time.sleep(delay)
if verbose:
- log('%d attempts left: %s' % (attempts, cmd))
+ log('%d attempts left: %s' % (attempts, masked_cmd))
response = response.strip()
if check:
if return_code > 0:
stderr = stderr.strip()
- print "Failed command: " + str(cmd)
+ print "Failed command: " + str(masked_cmd)
print "Command returned response: " + str(stderr)
print "Command return code: " + str(return_code)
raise Exception(stderr)
else:
- print "Command: " + str(cmd)
+ print "Command: " + str(masked_cmd)
print str(response)
return response
return response, return_code
@@ -77,6 +92,17 @@ def run_proc(cmd):
return process
+def run_proc_wait_terminated(process):
+ response = process.communicate()[0].strip()
+ return_code = process.returncode
+ return response, return_code
+
+
+def run_proc_kill(process):
+ response = process.kill()
+ return response
+
+
def parse(printout):
parsed_list = []
lines = printout.splitlines()
@@ -99,8 +125,10 @@ def clean(lines):
return parsed if len(parsed_list) == 1 else parsed_list
-def err(message):
+def err(message, fun = None, *args):
LOG.error('%s\n' % message)
+ if fun:
+ fun(*args)
sys.exit(1)
diff --git a/deploy/deploy.py b/deploy/deploy.py
index 179ee7bcb..8064af993 100755
--- a/deploy/deploy.py
+++ b/deploy/deploy.py
@@ -30,6 +30,7 @@ from common import (
err,
warn,
check_file_exists,
+ check_dir_exists,
create_dir_if_not_exists,
delete,
check_if_root,
@@ -61,7 +62,7 @@ class AutoDeploy(object):
def __init__(self, no_fuel, fuel_only, no_health_check, cleanup_only,
cleanup, storage_dir, pxe_bridge, iso_file, dea_file,
dha_file, fuel_plugins_dir, fuel_plugins_conf_dir,
- no_plugins, deploy_timeout, no_deploy_environment):
+ no_plugins, deploy_timeout, no_deploy_environment, deploy_log):
self.no_fuel = no_fuel
self.fuel_only = fuel_only
self.no_health_check = no_health_check
@@ -77,6 +78,7 @@ class AutoDeploy(object):
self.no_plugins = no_plugins
self.deploy_timeout = deploy_timeout
self.no_deploy_environment = no_deploy_environment
+ self.deploy_log = deploy_log
self.dea = (DeploymentEnvironmentAdapter(dea_file)
if not cleanup_only else None)
self.dha = DeploymentHardwareAdapter(dha_file)
@@ -202,7 +204,7 @@ class AutoDeploy(object):
self.fuel_username, self.fuel_password,
self.dea_file, self.fuel_plugins_conf_dir,
WORK_DIR, self.no_health_check, self.deploy_timeout,
- self.no_deploy_environment)
+ self.no_deploy_environment, self.deploy_log)
return dep.deploy()
def setup_execution_environment(self):
@@ -332,12 +334,17 @@ def parse_arguments():
parser.add_argument('-nde', dest='no_deploy_environment',
action='store_true', default=False,
help=('Do not launch environment deployment'))
+ parser.add_argument('-log', dest='deploy_log',
+ action='store', default='../ci/.',
+ help=('Path and name of the deployment log archive'))
args = parser.parse_args()
log(args)
check_file_exists(args.dha_file)
+ check_dir_exists(os.path.dirname(args.deploy_log))
+
if not args.cleanup_only:
check_file_exists(args.dea_file)
check_fuel_plugins_dir(args.fuel_plugins_dir)
@@ -350,6 +357,7 @@ def parse_arguments():
create_dir_if_not_exists(args.storage_dir)
check_bridge(args.pxe_bridge, args.dha_file)
+
kwargs = {'no_fuel': args.no_fuel, 'fuel_only': args.fuel_only,
'no_health_check': args.no_health_check,
'cleanup_only': args.cleanup_only, 'cleanup': args.cleanup,
@@ -360,7 +368,8 @@ def parse_arguments():
'fuel_plugins_conf_dir': args.fuel_plugins_conf_dir,
'no_plugins': args.no_plugins,
'deploy_timeout': args.deploy_timeout,
- 'no_deploy_environment': args.no_deploy_environment}
+ 'no_deploy_environment': args.no_deploy_environment,
+ 'deploy_log': args.deploy_log}
return kwargs
diff --git a/deploy/deploy_env.py b/deploy/deploy_env.py
index 5eeaf11e0..93dc3959b 100644
--- a/deploy/deploy_env.py
+++ b/deploy/deploy_env.py
@@ -20,6 +20,7 @@ from ssh_client import SSHClient
from common import (
err,
log,
+ exec_cmd,
parse,
N,
E,
@@ -35,7 +36,7 @@ class CloudDeploy(object):
def __init__(self, dea, dha, fuel_ip, fuel_username, fuel_password,
dea_file, fuel_plugins_conf_dir, work_dir, no_health_check,
- deploy_timeout, no_deploy_environment):
+ deploy_timeout, no_deploy_environment, deploy_log):
self.dea = dea
self.dha = dha
self.fuel_ip = fuel_ip
@@ -51,6 +52,7 @@ class CloudDeploy(object):
self.no_health_check = no_health_check
self.deploy_timeout = deploy_timeout
self.no_deploy_environment = no_deploy_environment
+ self.deploy_log = deploy_log
self.file_dir = os.path.dirname(os.path.realpath(__file__))
self.ssh = SSHClient(self.fuel_ip, self.fuel_username,
self.fuel_password)
@@ -256,6 +258,10 @@ class CloudDeploy(object):
self.set_boot_order(['pxe', 'disk'])
self.power_on_nodes()
+ def get_put_deploy_log(self):
+ with self.ssh as s:
+ s.scp_get("deploy-*", local=self.deploy_log)
+
def deploy(self):
self.set_boot_order_nodes()
@@ -272,4 +278,8 @@ class CloudDeploy(object):
delete(self.updated_dea_file)
- return self.run_cloud_deploy(CLOUD_DEPLOY_FILE)
+ rc = self.run_cloud_deploy(CLOUD_DEPLOY_FILE)
+
+ self.get_put_deploy_log()
+
+ return rc
diff --git a/deploy/dha_adapters/ipmi_adapter.py b/deploy/dha_adapters/ipmi_adapter.py
index 6ce4012f4..7cc930554 100644
--- a/deploy/dha_adapters/ipmi_adapter.py
+++ b/deploy/dha_adapters/ipmi_adapter.py
@@ -49,7 +49,8 @@ class IpmiAdapter(HardwareAdapter):
def node_get_state(self, node_id):
state = exec_cmd('%s chassis power status' % self.ipmi_cmd(node_id),
attempts=self.attempts, delay=self.delay,
- verbose=True)
+ verbose=True,
+ mask_args=[8,10])
return state
def _node_power_cmd(self, node_id, cmd):
@@ -59,10 +60,12 @@ class IpmiAdapter(HardwareAdapter):
pow_cmd = '%s chassis power %s' % (self.ipmi_cmd(node_id), cmd)
exec_cmd(pow_cmd, attempts=self.attempts, delay=self.delay,
- verbose=True)
+ verbose=True,
+ mask_args=[8,10])
attempts = self.attempts
while attempts:
+ time.sleep(self.delay)
state = self.node_get_state(node_id)
attempts -= 1
if state == expected:
@@ -70,7 +73,7 @@ class IpmiAdapter(HardwareAdapter):
elif attempts != 0:
# reinforce our will, but allow the command to fail,
# we know our message got across once already...
- exec_cmd(pow_cmd, check=False)
+ exec_cmd(pow_cmd, check=False, mask_args=[8,10])
err('Could not set chassis %s for node %s' % (cmd, node_id))
@@ -85,7 +88,9 @@ class IpmiAdapter(HardwareAdapter):
def node_reset(self, node_id):
log('RESET Node %s' % node_id)
cmd = '%s chassis power reset' % self.ipmi_cmd(node_id)
- exec_cmd(cmd, attempts=self.attempts, delay=self.delay, verbose=True)
+ exec_cmd(cmd, attempts=self.attempts, delay=self.delay,
+ verbose=True,
+ mask_args=[8,10])
def node_set_boot_order(self, node_id, boot_order_list):
log('Set boot order %s on Node %s' % (boot_order_list, node_id))
@@ -95,11 +100,15 @@ class IpmiAdapter(HardwareAdapter):
if dev == 'pxe':
exec_cmd('%s chassis bootdev pxe options=persistent'
% cmd_prefix, attempts=self.attempts, delay=self.delay,
- verbose=True)
+ verbose=True,
+ mask_args=[8,10])
elif dev == 'iso':
exec_cmd('%s chassis bootdev cdrom' % cmd_prefix,
- attempts=self.attempts, delay=self.delay, verbose=True)
+ attempts=self.attempts, delay=self.delay,
+ verbose=True,
+ mask_args=[8,10])
elif dev == 'disk':
exec_cmd('%s chassis bootdev disk options=persistent'
% cmd_prefix, attempts=self.attempts, delay=self.delay,
- verbose=True)
+ verbose=True,
+ mask_args=[8,10])
diff --git a/deploy/dha_adapters/zte_adapter.py b/deploy/dha_adapters/zte_adapter.py
index f6279fbf0..1e610ca41 100644
--- a/deploy/dha_adapters/zte_adapter.py
+++ b/deploy/dha_adapters/zte_adapter.py
@@ -27,14 +27,15 @@ class ZteAdapter(IpmiAdapter):
WAIT_LOOP = 600
log('RESET Node %s' % node_id)
cmd_prefix = self.ipmi_cmd(node_id)
- state = exec_cmd('%s chassis power status' % cmd_prefix)
+ state = exec_cmd('%s chassis power status' % cmd_prefix, mask_args=[8,10])
if state == 'Chassis Power is on':
was_shut_off = False
done = False
- exec_cmd('%s chassis power cycle' % cmd_prefix)
+ exec_cmd('%s chassis power cycle' % cmd_prefix, mask_args=[8,10])
for i in range(WAIT_LOOP):
state, _ = exec_cmd('%s chassis power status' % cmd_prefix,
- False)
+ check=False,
+ mask_args=[8,10])
if state == 'Chassis Power is off':
was_shut_off = True
elif state == 'Chassis Power is on' and was_shut_off:
diff --git a/deploy/environments/virtual_fuel.py b/deploy/environments/virtual_fuel.py
index f9f9f7ab9..7dc972025 100644
--- a/deploy/environments/virtual_fuel.py
+++ b/deploy/environments/virtual_fuel.py
@@ -54,14 +54,21 @@ class VirtualFuel(ExecutionEnvironment):
self.dha.get_node_property(
self.fuel_node_id, 'libvirtTemplate'))
check_file_exists(self.vm_template)
+ with open(self.vm_template) as f:
+ self.vm_xml = etree.parse(f)
+
+ self.temp_vm_file = '%s/%s' % (self.temp_dir, self.vm_name)
+ self.update_vm_template_file()
def __del__(self):
delete(self.temp_dir)
- def set_vm_nic(self, temp_vm_file):
- with open(temp_vm_file) as f:
- vm_xml = etree.parse(f)
- interfaces = vm_xml.xpath('/domain/devices/interface')
+ def update_vm_template_file(self):
+ with open(self.temp_vm_file, "wc") as f:
+ self.vm_xml.write(f, pretty_print=True, xml_declaration=True)
+
+ def set_vm_nic(self):
+ interfaces = self.vm_xml.xpath('/domain/devices/interface')
for interface in interfaces:
interface.getparent().remove(interface)
interface = etree.Element('interface')
@@ -70,12 +77,12 @@ class VirtualFuel(ExecutionEnvironment):
source.set('bridge', self.pxe_bridge)
model = etree.SubElement(interface, 'model')
model.set('type', 'virtio')
- devices = vm_xml.xpath('/domain/devices')
+ devices = self.vm_xml.xpath('/domain/devices')
if devices:
device = devices[0]
device.append(interface)
- with open(temp_vm_file, 'w') as f:
- vm_xml.write(f, pretty_print=True, xml_declaration=True)
+
+ self.update_vm_template_file()
def create_volume(self, pool, name, su, img_type='qcow2'):
log('Creating image using Libvirt volumes in pool %s, name: %s' %
diff --git a/deploy/reap.py b/deploy/reap.py
index 7624d6f6d..ed5bc994e 100644
--- a/deploy/reap.py
+++ b/deploy/reap.py
@@ -326,13 +326,6 @@ class Reap(object):
self.finale()
-def usage():
- print '''
- Usage:
- python reap.py <dea_file> <dha_file> <comment>
- '''
-
-
def parse_arguments():
parser = ArgParser(prog='python %s' % __file__)
parser.add_argument('dea_file', nargs='?', action='store',
diff --git a/deploy/ssh_client.py b/deploy/ssh_client.py
index df780961f..f6888d52d 100644
--- a/deploy/ssh_client.py
+++ b/deploy/ssh_client.py
@@ -85,14 +85,14 @@ class SSHClient(object):
def scp_get(self, remote, local='.', dir=False):
try:
- with scp.SCPClient(self.client.get_transport()) as _scp:
+ with scp.SCPClient(self.client.get_transport(), sanitize=lambda x: x) as _scp:
_scp.get(remote, local, dir)
except Exception as e:
err(e)
def scp_put(self, local, remote='.', dir=False):
try:
- with scp.SCPClient(self.client.get_transport()) as _scp:
+ with scp.SCPClient(self.client.get_transport(), sanitize=lambda x: x) as _scp:
_scp.put(local, remote, dir)
except Exception as e:
err(e)