summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--patches/fuel-plugin-opendaylight/0004-ODL-Upstart-service-Respawn-on-crash.patch38
-rw-r--r--patches/opnfv-fuel/upstream-backports/0005-CI-deploy-cache-Store-and-reuse-deploy-artifacts.patch738
2 files changed, 776 insertions, 0 deletions
diff --git a/patches/fuel-plugin-opendaylight/0004-ODL-Upstart-service-Respawn-on-crash.patch b/patches/fuel-plugin-opendaylight/0004-ODL-Upstart-service-Respawn-on-crash.patch
new file mode 100644
index 00000000..6a9150c0
--- /dev/null
+++ b/patches/fuel-plugin-opendaylight/0004-ODL-Upstart-service-Respawn-on-crash.patch
@@ -0,0 +1,38 @@
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Thu, 1 Dec 2016 13:12:35 +0100
+Subject: [PATCH] ODL Upstart service: Respawn on crash
+
+On arm64, JVM sometimes (~10%) crashes at the very start, due to
+JIT issues in our openjdk8 package.
+
+We can't really solve the JVM on arm64 issues in the near future,
+so we will try to work around the issue by configuring the ODL
+service as respawn-able (see [1] for more details).
+
+Note: This is specific to Ubuntu Trusty (14.04), which uses upstart,
+for Ubuntu Xenial (or anything else >14.x), which uses systemd,
+a different, equivalent mechanism should be used.
+
+[1] http://upstart.ubuntu.com/cookbook/#respawn
+
+JIRA: ARMBAND-134
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ odl_package/ubuntu/opendaylight | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/odl_package/ubuntu/opendaylight b/odl_package/ubuntu/opendaylight
+index fd84376..b25e3b8 100644
+--- a/odl_package/ubuntu/opendaylight
++++ b/odl_package/ubuntu/opendaylight
+@@ -8,6 +8,9 @@ stop on runlevel [!2345]
+ setgid odl
+ setuid odl
+
++respawn
++respawn limit 20 20
++
+ env KARAF_HOME="/opt/opendaylight"
+ env JAVA_OPTS="-server -Xms1g -Xmx2g -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+UnsyncloadClass -Dcom.sun.management.jmxremote"
+ env OPTS="-Dkaraf.startLocalConsole=false -Dkaraf.startRemoteShell=true"
diff --git a/patches/opnfv-fuel/upstream-backports/0005-CI-deploy-cache-Store-and-reuse-deploy-artifacts.patch b/patches/opnfv-fuel/upstream-backports/0005-CI-deploy-cache-Store-and-reuse-deploy-artifacts.patch
new file mode 100644
index 00000000..52e85a41
--- /dev/null
+++ b/patches/opnfv-fuel/upstream-backports/0005-CI-deploy-cache-Store-and-reuse-deploy-artifacts.patch
@@ -0,0 +1,738 @@
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Thu, 24 Nov 2016 23:02:04 +0100
+Subject: [PATCH] CI: deploy-cache: Store and reuse deploy artifacts
+
+Add support for caching deploy artifacts, like bootstraps and
+target images, which take a lot of time at each deploy to be built,
+considering it requires a cross-debootstrap via qemu-user-static and
+binfmt.
+
+For OPNFV CI, the cache will piggy back on the <iso_mount> mechanism,
+and be located at:
+/iso_mount/opnfv_ci/<branch>/deploy-cache
+
+TODO: Use dea interface adapter in target images fingerprinting.
+TODO: remote fingerprinting
+TODO: differentiate between bootstraps and targetimages, so we don't
+end up trying to use one cache artifact type as the other.
+
+JIRA: ARMBAND-172
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ ...p_admin_node.sh-deploy_cache-install-hook.patch | 69 +++++
+ ci/deploy.sh | 14 +-
+ deploy/cloud/deploy.py | 11 +
+ deploy/deploy.py | 25 +-
+ deploy/deploy_cache.py | 319 +++++++++++++++++++++
+ deploy/deploy_env.py | 13 +-
+ deploy/install_fuel_master.py | 9 +-
+ 7 files changed, 451 insertions(+), 9 deletions(-)
+ create mode 100644 build/f_repos/patch/fuel-main/0006-bootstrap_admin_node.sh-deploy_cache-install-hook.patch
+ create mode 100644 deploy/deploy_cache.py
+
+diff --git a/build/f_repos/patch/fuel-main/0006-bootstrap_admin_node.sh-deploy_cache-install-hook.patch b/build/f_repos/patch/fuel-main/0006-bootstrap_admin_node.sh-deploy_cache-install-hook.patch
+new file mode 100644
+index 0000000..d5b7646
+--- /dev/null
++++ b/build/f_repos/patch/fuel-main/0006-bootstrap_admin_node.sh-deploy_cache-install-hook.patch
+@@ -0,0 +1,69 @@
++From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
++Date: Mon, 28 Nov 2016 14:27:48 +0100
++Subject: [PATCH] bootstrap_admin_node.sh: deploy_cache install hook
++
++Tooling on the automatic deploy side was updated to support deploy
++caching of artifacts like bootstrap (and id_rsa keypair), target
++images etc.
++
++Add installation hook that calls `fuel-bootstrap import` instead of
++`build` when a bootstrap tar is available in the agreed location,
++/var/lib/opnfv/cache/bootstraps/.
++
++JIRA: ARMBAND-172
++
++Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
++---
++ iso/bootstrap_admin_node.sh | 20 +++++++++++++++++++-
++ 1 file changed, 19 insertions(+), 1 deletion(-)
++
++diff --git a/iso/bootstrap_admin_node.sh b/iso/bootstrap_admin_node.sh
++index abc5ffb..15e6261 100755
++--- a/iso/bootstrap_admin_node.sh
+++++ b/iso/bootstrap_admin_node.sh
++@@ -61,6 +61,8 @@ wget \
++
++ ASTUTE_YAML='/etc/fuel/astute.yaml'
++ BOOTSTRAP_NODE_CONFIG="/etc/fuel/bootstrap_admin_node.conf"
+++OPNFV_CACHE_PATH="/var/lib/opnfv/cache/bootstraps"
+++OPNFV_CACHE_TAR="opnfv-bootstraps-cache.tar"
++ bs_build_log='/var/log/fuel-bootstrap-image-build.log'
++ bs_status=0
++ # Backup network configs to this folder. Folder will be created only if
++@@ -94,6 +96,7 @@ image becomes available, reboot nodes that failed to be discovered."
++ bs_done_message="Default bootstrap image building done. Now you can boot new \
++ nodes over PXE, they will be discovered and become available for installing \
++ OpenStack on them"
+++bs_cache_message="OPNFV deploy cache: bootstrap image injected."
++ # Update issues messages
++ update_warn_message="There is an issue connecting to update repository of \
++ your distributions of OpenStack. \
++@@ -500,12 +503,27 @@ set_ui_bootstrap_error () {
++ EOF
++ }
++
+++function inject_cached_ubuntu_bootstrap () {
+++ if [ -f "${OPNFV_CACHE_PATH}/${OPNFV_CACHE_TAR}" -a \
+++ -f "${OPNFV_CACHE_PATH}/id_rsa.pub" -a \
+++ -f "${OPNFV_CACHE_PATH}/id_rsa" ]; then
+++ if cp "${OPNFV_CACHE_PATH}/id_rsa{,.pub}" "~/.ssh/" && \
+++ fuel-bootstrap -v --debug import --activate \
+++ "${OPNFV_CACHE_PATH}/${OPNFV_CACHE_TAR}" >>"$bs_build_log" 2>&1; then
+++ fuel notify --topic "done" --send "${bs_cache_message}"
+++ return 0
+++ fi
+++ fi
+++ return 1
+++}
+++
++ # Actually build the bootstrap image
++ build_ubuntu_bootstrap () {
++ local ret=1
++ echo ${bs_progress_message} >&2
++ set_ui_bootstrap_error "${bs_progress_message}" >&2
++- if fuel-bootstrap -v --debug build --target_arch arm64 --activate >>"$bs_build_log" 2>&1; then
+++ if inject_cached_ubuntu_bootstrap || fuel-bootstrap -v --debug \
+++ build --activate --target_arch arm64 >>"$bs_build_log" 2>&1; then
++ ret=0
++ fuel notify --topic "done" --send "${bs_done_message}"
++ else
+diff --git a/ci/deploy.sh b/ci/deploy.sh
+index 081806c..4b1ae0e 100755
+--- a/ci/deploy.sh
++++ b/ci/deploy.sh
+@@ -29,7 +29,7 @@ cat << EOF
+ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ `basename $0`: Deploys the Fuel@OPNFV stack
+
+-usage: `basename $0` -b base-uri [-B PXE Bridge] [-f] [-F] [-H] -l lab-name -p pod-name -s deploy-scenario [-S image-dir] [-T timeout] -i iso
++usage: `basename $0` -b base-uri [-B PXE Bridge] [-f] [-F] [-H] -l lab-name -p pod-name -s deploy-scenario [-S image-dir] [-C deploy-cache-dir] [-T timeout] -i iso
+ -s deployment-scenario [-S optional Deploy-scenario path URI]
+ [-R optional local relen repo (containing deployment Scenarios]
+
+@@ -47,6 +47,7 @@ OPTIONS:
+ -p Pod-name
+ -s Deploy-scenario short-name/base-file-name
+ -S Storage dir for VM images
++ -C Deploy cache dir for storing image artifacts
+ -T Timeout, in minutes, for the deploy.
+ -i iso url
+
+@@ -79,6 +80,7 @@ Input parameters to the build script is:
+ or a deployment short-name as defined by scenario.yaml in the deployment
+ scenario path.
+ -S Storage dir for VM images, default is fuel/deploy/images
++-C Deploy cache dir for bootstrap and target image artifacts, optional
+ -T Timeout, in minutes, for the deploy. It defaults to using the DEPLOY_TIMEOUT
+ environment variable when defined, or to the default in deploy.py otherwise
+ -i .iso image to be deployed (needs to be provided in a URI
+@@ -116,6 +118,7 @@ FUEL_CREATION_ONLY=''
+ NO_DEPLOY_ENVIRONMENT=''
+ STORAGE_DIR=''
+ DRY_RUN=0
++DEPLOY_CACHE_DIR=''
+ if ! [ -z $DEPLOY_TIMEOUT ]; then
+ DEPLOY_TIMEOUT="-dt $DEPLOY_TIMEOUT"
+ else
+@@ -128,7 +131,7 @@ fi
+ ############################################################################
+ # BEGIN of main
+ #
+-while getopts "b:B:dfFHl:L:p:s:S:T:i:he" OPTION
++while getopts "b:B:dfFHl:L:p:s:S:C:T:i:he" OPTION
+ do
+ case $OPTION in
+ b)
+@@ -179,6 +182,9 @@ do
+ STORAGE_DIR="-s ${OPTARG}"
+ fi
+ ;;
++ C)
++ DEPLOY_CACHE_DIR="-dc ${OPTARG}"
++ ;;
+ T)
+ DEPLOY_TIMEOUT="-dt ${OPTARG}"
+ ;;
+@@ -243,8 +249,8 @@ if [ $DRY_RUN -eq 0 ]; then
+ ISO=${SCRIPT_PATH}/ISO/image.iso
+ fi
+ # Start deployment
+- echo "python deploy.py $DEPLOY_LOG $STORAGE_DIR $PXE_BRIDGE $USE_EXISTING_FUEL $FUEL_CREATION_ONLY $NO_HEALTH_CHECK $NO_DEPLOY_ENVIRONMENT -dea ${SCRIPT_PATH}/config/dea.yaml -dha ${SCRIPT_PATH}/config/dha.yaml -iso $ISO $DEPLOY_TIMEOUT"
+- python deploy.py $DEPLOY_LOG $STORAGE_DIR $PXE_BRIDGE $USE_EXISTING_FUEL $FUEL_CREATION_ONLY $NO_HEALTH_CHECK $NO_DEPLOY_ENVIRONMENT -dea ${SCRIPT_PATH}/config/dea.yaml -dha ${SCRIPT_PATH}/config/dha.yaml -iso $ISO $DEPLOY_TIMEOUT
++ echo "python deploy.py $DEPLOY_LOG $STORAGE_DIR $PXE_BRIDGE $USE_EXISTING_FUEL $FUEL_CREATION_ONLY $NO_HEALTH_CHECK $NO_DEPLOY_ENVIRONMENT -dea ${SCRIPT_PATH}/config/dea.yaml -dha ${SCRIPT_PATH}/config/dha.yaml -iso $ISO $DEPLOY_TIMEOUT $DEPLOY_CACHE_DIR"
++ python deploy.py $DEPLOY_LOG $STORAGE_DIR $PXE_BRIDGE $USE_EXISTING_FUEL $FUEL_CREATION_ONLY $NO_HEALTH_CHECK $NO_DEPLOY_ENVIRONMENT -dea ${SCRIPT_PATH}/config/dea.yaml -dha ${SCRIPT_PATH}/config/dha.yaml -iso $ISO $DEPLOY_TIMEOUT $DEPLOY_CACHE_DIR
+ fi
+ popd > /dev/null
+
+diff --git a/deploy/cloud/deploy.py b/deploy/cloud/deploy.py
+index e00934b..b39e5fc 100644
+--- a/deploy/cloud/deploy.py
++++ b/deploy/cloud/deploy.py
+@@ -14,6 +14,7 @@ import io
+ from dea import DeploymentEnvironmentAdapter
+ from configure_environment import ConfigureEnvironment
+ from deployment import Deployment
++from deploy_cache import DeployCache
+
+ from common import (
+ R,
+@@ -61,6 +62,12 @@ class Deploy(object):
+ config_env.configure_environment()
+ self.env_id = config_env.env_id
+
++ def deploy_cache_install_targetimages(self):
++ DeployCache.install_targetimages_for_env(self.env_id)
++
++ def deploy_cache_extract_targetimages(self):
++ DeployCache.extract_targetimages_from_env(self.env_id)
++
+ def deploy_cloud(self):
+ dep = Deployment(self.dea, YAML_CONF_DIR, self.env_id,
+ self.node_roles_dict, self.no_health_check,
+@@ -76,8 +83,12 @@ class Deploy(object):
+
+ self.configure_environment()
+
++ self.deploy_cache_install_targetimages()
++
+ self.deploy_cloud()
+
++ self.deploy_cache_extract_targetimages()
++
+
+ def parse_arguments():
+ parser = ArgParser(prog='python %s' % __file__)
+diff --git a/deploy/deploy.py b/deploy/deploy.py
+index 08702d2..1a55361 100755
+--- a/deploy/deploy.py
++++ b/deploy/deploy.py
+@@ -23,6 +23,7 @@ from dea import DeploymentEnvironmentAdapter
+ from dha import DeploymentHardwareAdapter
+ from install_fuel_master import InstallFuelMaster
+ from deploy_env import CloudDeploy
++from deploy_cache import DeployCache
+ from execution_environment import ExecutionEnvironment
+
+ from common import (
+@@ -62,7 +63,8 @@ class AutoDeploy(object):
+ def __init__(self, no_fuel, fuel_only, no_health_check, cleanup_only,
+ cleanup, storage_dir, pxe_bridge, iso_file, dea_file,
+ dha_file, fuel_plugins_dir, fuel_plugins_conf_dir,
+- no_plugins, deploy_timeout, no_deploy_environment, deploy_log):
++ no_plugins, deploy_cache_dir, deploy_timeout,
++ no_deploy_environment, deploy_log):
+ self.no_fuel = no_fuel
+ self.fuel_only = fuel_only
+ self.no_health_check = no_health_check
+@@ -76,6 +78,7 @@ class AutoDeploy(object):
+ self.fuel_plugins_dir = fuel_plugins_dir
+ self.fuel_plugins_conf_dir = fuel_plugins_conf_dir
+ self.no_plugins = no_plugins
++ self.deploy_cache_dir = deploy_cache_dir
+ self.deploy_timeout = deploy_timeout
+ self.no_deploy_environment = no_deploy_environment
+ self.deploy_log = deploy_log
+@@ -117,7 +120,7 @@ class AutoDeploy(object):
+ self.fuel_username, self.fuel_password,
+ self.dea_file, self.fuel_plugins_conf_dir,
+ WORK_DIR, self.no_health_check,
+- self.deploy_timeout,
++ self.deploy_cache_dir, self.deploy_timeout,
+ self.no_deploy_environment, self.deploy_log)
+ with old_dep.ssh:
+ old_dep.check_previous_installation()
+@@ -129,6 +132,7 @@ class AutoDeploy(object):
+ self.fuel_conf['ip'], self.fuel_username,
+ self.fuel_password, self.fuel_node_id,
+ self.iso_file, WORK_DIR,
++ self.deploy_cache_dir,
+ self.fuel_plugins_dir, self.no_plugins)
+ fuel.install()
+
+@@ -137,6 +141,7 @@ class AutoDeploy(object):
+ tmp_new_dir = '%s/newiso' % self.tmp_dir
+ try:
+ self.copy(tmp_orig_dir, tmp_new_dir)
++ self.deploy_cache_fingerprints(tmp_new_dir)
+ self.patch(tmp_new_dir, new_iso)
+ except Exception as e:
+ exec_cmd('fusermount -u %s' % tmp_orig_dir, False)
+@@ -157,6 +162,12 @@ class AutoDeploy(object):
+ delete(tmp_orig_dir)
+ exec_cmd('chmod -R 755 %s' % tmp_new_dir)
+
++ def deploy_cache_fingerprints(self, tmp_new_dir):
++ if self.deploy_cache_dir:
++ log('Deploy cache: Collecting fingerprints...')
++ deploy_cache = DeployCache(self.deploy_cache_dir)
++ deploy_cache.do_fingerprints(tmp_new_dir, self.dea_file)
++
+ def patch(self, tmp_new_dir, new_iso):
+ log('Patching...')
+ patch_dir = '%s/%s' % (CWD, PATCH_DIR)
+@@ -219,7 +230,8 @@ class AutoDeploy(object):
+ dep = CloudDeploy(self.dea, self.dha, self.fuel_conf['ip'],
+ self.fuel_username, self.fuel_password,
+ self.dea_file, self.fuel_plugins_conf_dir,
+- WORK_DIR, self.no_health_check, self.deploy_timeout,
++ WORK_DIR, self.no_health_check,
++ self.deploy_cache_dir, self.deploy_timeout,
+ self.no_deploy_environment, self.deploy_log)
+ return dep.deploy()
+
+@@ -344,6 +356,8 @@ def parse_arguments():
+ help='Fuel Plugins Configuration directory')
+ parser.add_argument('-np', dest='no_plugins', action='store_true',
+ default=False, help='Do not install Fuel Plugins')
++ parser.add_argument('-dc', dest='deploy_cache_dir', action='store',
++ help='Deploy Cache Directory')
+ parser.add_argument('-dt', dest='deploy_timeout', action='store',
+ default=240, help='Deployment timeout (in minutes) '
+ '[default: 240]')
+@@ -377,6 +391,10 @@ def parse_arguments():
+ for bridge in args.pxe_bridge:
+ check_bridge(bridge, args.dha_file)
+
++ if args.deploy_cache_dir:
++ log('Using deploy cache directory: %s' % args.deploy_cache_dir)
++ create_dir_if_not_exists(args.deploy_cache_dir)
++
+
+ kwargs = {'no_fuel': args.no_fuel, 'fuel_only': args.fuel_only,
+ 'no_health_check': args.no_health_check,
+@@ -387,6 +405,7 @@ def parse_arguments():
+ 'fuel_plugins_dir': args.fuel_plugins_dir,
+ 'fuel_plugins_conf_dir': args.fuel_plugins_conf_dir,
+ 'no_plugins': args.no_plugins,
++ 'deploy_cache_dir': args.deploy_cache_dir,
+ 'deploy_timeout': args.deploy_timeout,
+ 'no_deploy_environment': args.no_deploy_environment,
+ 'deploy_log': args.deploy_log}
+diff --git a/deploy/deploy_cache.py b/deploy/deploy_cache.py
+new file mode 100644
+index 0000000..d7ec1c7
+--- /dev/null
++++ b/deploy/deploy_cache.py
+@@ -0,0 +1,319 @@
++###############################################################################
++# Copyright (c) 2016 Enea AB and others.
++# Alexandru.Avadanii@enea.com
++# All rights reserved. This program and the accompanying materials
++# are made available under the terms of the Apache License, Version 2.0
++# which accompanies this distribution, and is available at
++# http://www.apache.org/licenses/LICENSE-2.0
++###############################################################################
++
++import glob
++import hashlib
++import io
++import json
++import os
++import shutil
++import yaml
++
++from common import (
++ exec_cmd,
++ log,
++)
++
++###############################################################################
++# Deploy Cache Flow Overview
++###############################################################################
++# 1. do_fingerprints
++# Can be called as soon as a Fuel Master ISO chroot is available.
++# This will gather all required information for uniquely identifying the
++# objects in cache (bootstraps, targetimages).
++# 2. inject_cache
++# Can be called as soon as we have a steady SSH connection to the Fuel
++# Master node. It will inject cached artifacts over SSH, for later install.
++# 3. (external, async) install cached bootstrap instead of building a new one
++# /sbin/bootstrap_admin_node.sh will check for cached bootstrap images
++# (with id_rsa, id_rsa.pub attached) and will install those via
++# $ fuel-bootstrap import opfnv-bootstraps-cache.tar
++# 4. install_targetimages_for_env
++# Should be called before cloud deploy is started, to install env-generic
++# 'env_X_...' cached images for the current environment ID.
++# Static method, to be used on the remote Fuel Master node; does not require
++# access to the deploy cache, it only moves around some local files.
++# 5. extract_targetimages_from_env
++# Should be called at env deploy finish, to prepare artifacts for caching.
++# Static method, same observations as above apply.
++# 6. collect_artifacts
++# Call last, to collect all artifacts.
++###############################################################################
++
++###############################################################################
++# Deploy cache artifacts:
++# - id_rsa
++# - bootstrap image (Ubuntu)
++# - environment target image (Ubuntu)
++###############################################################################
++# Cache fingerprint covers:
++# - bootstrap:
++# - local mirror contents
++# - package list (and everything else in fuel_bootstrap_cli.yaml)
++# - target image:
++# - local mirror contents
++# - package list (determined from DEA)
++###############################################################################
++# WARN: Cache fingerprint does NOT yet cover:
++# - image_data (always assume the default /boot, /);
++# - output_dir (always assume the default /var/www/nailgun/targetimages;
++# - codename (always assume the default, currently 'trusty');
++# - extra_dirs: /usr/share/fuel_bootstrap_cli/files/trusty
++# - root_ssh_authorized_file, inluding the contents of /root/.ssh/id_rsa.pub
++# - Auxiliary repo .../mitaka-9.0/ubuntu/auxiliary
++# If the above change without triggering a cache miss, try clearing the cache.
++###############################################################################
++# WARN: Bootstrap caching implies RSA keypair to be reused!
++###############################################################################
++
++# Local mirrros will be used on Fuel Master for both bootstrap and target image
++# build, from `http://127.0.0.1:8080/...` or `http://10.20.0.2:8080/...`:
++# - MOS .../mitaka-9.0/ubuntu/x86_64
++# - Ubuntu .../mirrors/ubuntu/
++# All these reside on Fuel Master at local path:
++NAILGUN_PATH = '/var/www/nailgun/'
++
++# Artifact names (corresponding to nailgun subdirs)
++MIRRORS = 'mirrors'
++BOOTSTRAPS = 'bootstraps'
++TARGETIMAGES = 'targetimages'
++
++# Info for collecting RSA keypair
++RSA_KEYPAIR_PATH = '/root/.ssh'
++RSA_KEYPAIR_FILES = ['id_rsa', 'id_rsa.pub']
++
++# Relative path for collecting the active bootstrap image(s) after env deploy
++NAILGUN_ACT_BOOTSTRAP_SUBDIR = '%s/active_bootstrap' % BOOTSTRAPS
++
++# Relative path for collecting target image(s) for deployed enviroment
++NAILGUN_TIMAGES_SUBDIR = TARGETIMAGES
++
++# OPNFV Fuel bootstrap settings file that will be injected at deploy
++ISO_BOOTSTRAP_CLI_YAML = '/opnfv/fuel_bootstrap_cli.yaml'
++
++# OPNFV Deploy Cache path on Fuel Master, where artifacts will be injected
++REMOTE_CACHE_PATH = '/var/lib/opnfv/cache'
++
++# OPNFV Bootstrap Cache tar archive name, to be used by bootstrap_admin_node.sh
++BOOTSTRAP_ARCHIVE = 'opnfv-bootstraps-cache.tar'
++
++# Env-ID indep prefix
++ENVX = 'env_X_'
++
++class DeployCache(object):
++ """OPNFV Deploy Cache - managed storage for cacheable artifacts"""
++
++ def __init__(self, cache_dir,
++ fingerprints_yaml='deploy_cache_fingerprints.yaml'):
++ self.cache_dir = cache_dir
++ self.fingerprints_yaml = fingerprints_yaml
++ self.fingerprints = {BOOTSTRAPS: None,
++ MIRRORS: None,
++ TARGETIMAGES: None}
++
++ def __load_fingerprints(self):
++ """Load deploy cache yaml config holding fingerprints"""
++ if os.path.isfile(self.fingerprints_yaml):
++ cache_fingerprints = open(self.fingerprints_yaml).read()
++ self.fingerprints = yaml.load(cache_fingerprints)
++
++ def __save_fingerprints(self):
++ """Update deploy cache yaml config holding fingerprints"""
++ with open(self.fingerprints_yaml, 'w') as outfile:
++ outfile.write(yaml.safe_dump(self.fingerprints,
++ default_flow_style=False))
++
++ def __fingerprint_mirrors(self, chroot_path):
++ """Collect repo mirror fingerprints"""
++ md5sums = list()
++ # Scan all ISO for deb repo metadata and collect MD5 from Release files
++ for root, _, files in os.walk(chroot_path):
++ for relf in files:
++ if relf == 'Release' and 'binary' not in root:
++ collect_sums = False
++ filepath = os.path.join(root, relf)
++ with open(filepath, "r") as release_file:
++ for line in release_file:
++ if collect_sums:
++ if line.startswith(' '):
++ md5sums += [line[1:33]]
++ else:
++ break
++ elif line.startswith('MD5Sum:'):
++ collect_sums = True
++ sorted_md5sums = json.dumps(md5sums, sort_keys=True)
++ self.fingerprints[MIRRORS] = hashlib.sha1(sorted_md5sums).hexdigest()
++
++ def __fingerprint_bootstrap(self, chroot_path):
++ """Collect bootstrap image metadata fingerprints"""
++ # FIXME(armband): include 'extra_dirs' contents
++ cli_yaml_path = os.path.join(chroot_path, ISO_BOOTSTRAP_CLI_YAML[1:])
++ bootstrap_cli_yaml = open(cli_yaml_path).read()
++ bootstrap_data = yaml.load(bootstrap_cli_yaml)
++ sorted_data = json.dumps(bootstrap_data, sort_keys=True)
++ self.fingerprints[BOOTSTRAPS] = hashlib.sha1(sorted_data).hexdigest()
++
++ def __fingerprint_target(self, dea_file):
++ """Collect target image metadata fingerprints"""
++ # FIXME(armband): include 'image_data', 'codename', 'output'
++ with io.open(dea_file) as stream:
++ dea = yaml.load(stream)
++ editable = dea['settings']['editable']
++ target_data = {'packages': editable['provision']['packages'],
++ 'repos': editable['repo_setup']['repos']}
++ s_data = json.dumps(target_data, sort_keys=True)
++ self.fingerprints[TARGETIMAGES] = hashlib.sha1(s_data).hexdigest()
++
++ def do_fingerprints(self, chroot_path, dea_file):
++ """Collect SHA1 fingerprints based on chroot contents, DEA settings"""
++ try:
++ self.__load_fingerprints()
++ self.__fingerprint_mirrors(chroot_path)
++ self.__fingerprint_bootstrap(chroot_path)
++ self.__fingerprint_target(dea_file)
++ self.__save_fingerprints()
++ except Exception as ex:
++ log('Failed to get cache fingerprint: %s' % str(ex))
++
++ def __lookup_cache(self, sha):
++ """Search for object in cache based on SHA fingerprint"""
++ cache_sha_dir = os.path.join(self.cache_dir, sha)
++ if not os.path.isdir(cache_sha_dir) or not os.listdir(cache_sha_dir):
++ return None
++ return cache_sha_dir
++
++ def __inject_cache_dir(self, ssh, sha, artifact):
++ """Stage cached object (dir) in Fuel Master OPNFV local cache"""
++ local_path = self.__lookup_cache(sha)
++ if local_path:
++ remote_path = os.path.join(REMOTE_CACHE_PATH, artifact)
++ with ssh:
++ ssh.exec_cmd('mkdir -p %s' % remote_path)
++ for cachedfile in glob.glob('%s/*' % local_path):
++ ssh.scp_put(cachedfile, remote_path)
++ return local_path
++
++ def __mix_fingerprints(self, f1, f2):
++ """Compute composite fingerprint"""
++ if self.fingerprints[f1] is None or self.fingerprints[f2] is None:
++ return None
++ return hashlib.sha1('%s%s' %
++ (self.fingerprints[f1], self.fingerprints[f2])).hexdigest()
++
++ def inject_cache(self, ssh):
++ """Lookup artifacts in cache and inject them over SSH/SCP into Fuel"""
++ try:
++ self.__load_fingerprints()
++ for artifact in [BOOTSTRAPS, TARGETIMAGES]:
++ sha = self.__mix_fingerprints(MIRRORS, artifact)
++ if sha is None:
++ log('Missing fingerprint for: %s' % artifact)
++ continue
++ if not self.__inject_cache_dir(ssh, sha, artifact):
++ log('SHA1 not in cache: %s (%s)' % (str(sha), artifact))
++ else:
++ log('SHA1 injected: %s (%s)' % (str(sha), artifact))
++ except Exception as ex:
++ log('Failed to inject cached artifacts into Fuel: %s' % str(ex))
++
++ def __extract_bootstraps(self, ssh, cache_sha_dir):
++ """Collect bootstrap artifacts from Fuel over SSH/SCP"""
++ remote_tar = os.path.join(REMOTE_CACHE_PATH, BOOTSTRAP_ARCHIVE)
++ local_tar = os.path.join(cache_sha_dir, BOOTSTRAP_ARCHIVE)
++ with ssh:
++ for k in RSA_KEYPAIR_FILES:
++ ssh.scp_get(os.path.join(RSA_KEYPAIR_PATH, k),
++ local=os.path.join(cache_sha_dir, k))
++ ssh.exec_cmd('tar cf %s %s/*', remote_tar,
++ os.path.join(NAILGUN_PATH, NAILGUN_ACT_BOOTSTRAP_SUBDIR))
++ ssh.scp_get(remote_tar, local=local_tar)
++ ssh.exec_cmd('rm -f %s', remote_tar)
++
++ def __extract_targetimages(self, ssh, cache_sha_dir):
++ """Collect target image artifacts from Fuel over SSH/SCP"""
++ cti_path = os.path.join(REMOTE_CACHE_PATH, TARGETIMAGES)
++ with ssh:
++ ssh.scp_get('%s/%s*' % (cti_path, ENVX), local=cache_sha_dir)
++
++ def collect_artifacts(self, ssh):
++ """Collect artifacts from Fuel over SSH/SCP and add them to cache"""
++ try:
++ self.__load_fingerprints()
++ for artifact, func in {
++ BOOTSTRAPS: self.__extract_bootstraps,
++ TARGETIMAGES: self.__extract_targetimages
++ }.iteritems():
++ sha = self.__mix_fingerprints(MIRRORS, artifact)
++ if sha is None:
++ log('WARN: Skip caching, NO fingerprint: %s' % artifact)
++ continue
++ local_path = self.__lookup_cache(sha)
++ if local_path:
++ log('SHA1 already in cache: %s (%s)' % (str(sha), artifact))
++ else:
++ log('New cache SHA1: %s (%s)' % (str(sha), artifact))
++ cache_sha_dir = os.path.join(self.cache_dir, sha)
++ exec_cmd('mkdir -p %s' % cache_sha_dir)
++ func(ssh, cache_sha_dir)
++ except Exception as ex:
++ log('Failed to extract artifacts from Fuel: %s' % str(ex))
++
++ @staticmethod
++ def extract_targetimages_from_env(env_id):
++ """Prepare targetimages from env ID for storage in deploy cache
++
++ NOTE: This method should be executed locally ON the Fuel Master node.
++ WARN: This method overwrites targetimages cache on Fuel Master node.
++ """
++ env_n = 'env_%s_' % str(env_id)
++ cti_path = os.path.join(REMOTE_CACHE_PATH, TARGETIMAGES)
++ ti_path = os.path.join(NAILGUN_PATH, NAILGUN_TIMAGES_SUBDIR)
++ try:
++ exec_cmd('rm -rf %s && mkdir -p %s' % (cti_path, cti_path))
++ for root, _, files in os.walk(ti_path):
++ for tif in files:
++ if tif.startswith(env_n):
++ src = os.path.join(root, tif)
++ dest = os.path.join(cti_path, tif.replace(env_n, ENVX))
++ if tif.endswith('.yaml'):
++ shutil.copy(src, dest)
++ exec_cmd('sed -i "s|%s|%s|g" %s' %
++ (env_n, ENVX, dest))
++ else:
++ os.link(src, dest)
++ except Exception as ex:
++ log('Failed to extract targetimages artifacts from env %s: %s' %
++ (str(env_id), str(ex)))
++
++ @staticmethod
++ def install_targetimages_for_env(env_id):
++ """Install targetimages artifacts for a specific env ID
++
++ NOTE: This method should be executed locally ON the Fuel Master node.
++ """
++ env_n = 'env_%s_' % str(env_id)
++ cti_path = os.path.join(REMOTE_CACHE_PATH, TARGETIMAGES)
++ ti_path = os.path.join(NAILGUN_PATH, NAILGUN_TIMAGES_SUBDIR)
++ if not os.path.isdir(cti_path):
++ log('%s cache dir not found: %s' % (TARGETIMAGES, cti_path))
++ else:
++ try:
++ for root, _, files in os.walk(cti_path):
++ for tif in files:
++ src = os.path.join(root, tif)
++ dest = os.path.join(ti_path, tif.replace(ENVX, env_n))
++ if tif.endswith('.yaml'):
++ shutil.copy(src, dest)
++ exec_cmd('sed -i "s|%s|%s|g" %s' %
++ (ENVX, env_n, dest))
++ else:
++ os.link(src, dest)
++ except Exception as ex:
++ log('Failed to install targetimages for env %s: %s' %
++ (str(env_id), str(ex)))
+diff --git a/deploy/deploy_env.py b/deploy/deploy_env.py
+index 1d2dfeb..2375f51 100644
+--- a/deploy/deploy_env.py
++++ b/deploy/deploy_env.py
+@@ -15,6 +15,7 @@ import glob
+ import time
+ import shutil
+
++from deploy_cache import DeployCache
+ from ssh_client import SSHClient
+
+ from common import (
+@@ -36,7 +37,8 @@ class CloudDeploy(object):
+
+ def __init__(self, dea, dha, fuel_ip, fuel_username, fuel_password,
+ dea_file, fuel_plugins_conf_dir, work_dir, no_health_check,
+- deploy_timeout, no_deploy_environment, deploy_log):
++ deploy_cache_dir, deploy_timeout,
++ no_deploy_environment, deploy_log):
+ self.dea = dea
+ self.dha = dha
+ self.fuel_ip = fuel_ip
+@@ -50,6 +52,8 @@ class CloudDeploy(object):
+ self.fuel_plugins_conf_dir = fuel_plugins_conf_dir
+ self.work_dir = work_dir
+ self.no_health_check = no_health_check
++ self.deploy_cache = ( DeployCache(deploy_cache_dir)
++ if deploy_cache_dir else None )
+ self.deploy_timeout = deploy_timeout
+ self.no_deploy_environment = no_deploy_environment
+ self.deploy_log = deploy_log
+@@ -83,9 +87,14 @@ class CloudDeploy(object):
+ self.work_dir, os.path.basename(self.dea_file)))
+ s.scp_put('%s/common.py' % self.file_dir, self.work_dir)
+ s.scp_put('%s/dea.py' % self.file_dir, self.work_dir)
++ s.scp_put('%s/deploy_cache.py' % self.file_dir, self.work_dir)
+ for f in glob.glob('%s/cloud/*' % self.file_dir):
+ s.scp_put(f, self.work_dir)
+
++ def deploy_cache_collect_artifacts(self):
++ if self.deploy_cache:
++ self.deploy_cache.collect_artifacts(self.ssh)
++
+ def power_off_nodes(self):
+ for node_id in self.node_ids:
+ self.dha.node_power_off(node_id)
+@@ -284,4 +293,6 @@ class CloudDeploy(object):
+
+ self.get_put_deploy_log()
+
++ self.deploy_cache_collect_artifacts()
++
+ return rc
+diff --git a/deploy/install_fuel_master.py b/deploy/install_fuel_master.py
+index ccc18d3..2615818 100644
+--- a/deploy/install_fuel_master.py
++++ b/deploy/install_fuel_master.py
+@@ -10,6 +10,7 @@
+ import time
+ import os
+ import glob
++from deploy_cache import DeployCache
+ from ssh_client import SSHClient
+ from dha_adapters.libvirt_adapter import LibvirtAdapter
+
+@@ -33,7 +34,7 @@ class InstallFuelMaster(object):
+
+ def __init__(self, dea_file, dha_file, fuel_ip, fuel_username,
+ fuel_password, fuel_node_id, iso_file, work_dir,
+- fuel_plugins_dir, no_plugins):
++ deploy_cache_dir, fuel_plugins_dir, no_plugins):
+ self.dea_file = dea_file
+ self.dha = LibvirtAdapter(dha_file)
+ self.fuel_ip = fuel_ip
+@@ -43,6 +44,8 @@ class InstallFuelMaster(object):
+ self.iso_file = iso_file
+ self.iso_dir = os.path.dirname(self.iso_file)
+ self.work_dir = work_dir
++ self.deploy_cache = ( DeployCache(deploy_cache_dir)
++ if deploy_cache_dir else None )
+ self.fuel_plugins_dir = fuel_plugins_dir
+ self.no_plugins = no_plugins
+ self.file_dir = os.path.dirname(os.path.realpath(__file__))
+@@ -84,6 +87,10 @@ class InstallFuelMaster(object):
+ log('Wait until Fuel menu is up')
+ fuel_menu_pid = self.wait_until_fuel_menu_up()
+
++ if self.deploy_cache:
++ log('Deploy cache: Injecting bootstraps and targetimages')
++ self.deploy_cache.inject_cache(self.ssh)
++
+ log('Inject our own astute.yaml and fuel_bootstrap_cli.yaml settings')
+ self.inject_own_astute_and_bootstrap_yaml()
+