summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/cache.mk2
-rw-r--r--build/docker/.gitignore2
-rw-r--r--build/docker/Makefile73
-rw-r--r--deploy/cloud/deployment.py116
-rw-r--r--deploy/environments/execution_environment.py2
5 files changed, 153 insertions, 42 deletions
diff --git a/build/cache.mk b/build/cache.mk
index f6db01797..a65f310d0 100644
--- a/build/cache.mk
+++ b/build/cache.mk
@@ -51,5 +51,5 @@ cache:
exit 1; \
fi
@docker version >/dev/null 2>&1 || (echo 'No Docker installation available'; exit 1)
- @make -C docker
+ @make -C docker get-cache all
docker/runcontext $(DOCKERIMG) $(MAKE) $(MAKEFLAGS) cached-all
diff --git a/build/docker/.gitignore b/build/docker/.gitignore
new file mode 100644
index 000000000..2585910ce
--- /dev/null
+++ b/build/docker/.gitignore
@@ -0,0 +1,2 @@
+.docker*
+ubuntu-builder/Dockerfile
diff --git a/build/docker/Makefile b/build/docker/Makefile
index d4423b0a2..783881e8d 100644
--- a/build/docker/Makefile
+++ b/build/docker/Makefile
@@ -1,5 +1,5 @@
##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
+# Copyright (c) 2015,2016 Ericsson AB and others.
# stefan.k.berg@ericsson.com
# jonas.bjurel@ericsson.com
# All rights reserved. This program and the accompanying materials
@@ -12,6 +12,15 @@ SHELL = /bin/bash
FILES = $(wildcard ubuntu-builder/*) runcontext
DOCKER_VER := $(shell [[ "$$(docker version --format '{{.Client.Version}}')" =~ ([0-9]+)\.([0-9]+) ]] && echo $$(( $${BASH_REMATCH[1]} * 100 + $${BASH_REMATCH[2]} )))
+# Builder tag lifespan, force container rebuild X days after tag creation
+DOCKER_KEEP = 1 days
+DOCKER_IMG = opnfv.org/ubuntu-builder
+DOCKER_TAG = ${DOCKER_IMG}:14.04
+# Shell contruct for checking our tag object did not expire
+DOCKER_EXPIRED = D_TAG_BIRTH=`docker inspect --format="{{.Created}}" \
+ ${DOCKER_TAG} 2>/dev/null`; test -z "$$D_TAG_BIRTH" -o `date +%s` -gt \
+ `date -d "$$D_TAG_BIRTH +${DOCKER_KEEP}" +%s`; echo $$?
+
# Don't use -f flag when docker is newer than 1.9
# https://docs.docker.com/engine/deprecated/#/f-flag-on-docker-tag
ifeq ($(shell echo "$(DOCKER_VER)>109" | bc), 1)
@@ -23,7 +32,7 @@ endif
.PHONY: all
all: .docker
-.docker: $(FILES)
+.dockercfg: $(FILES)
cp Dockerfile ubuntu-builder/Dockerfile
# Only add proxy ENVs where set in host - needed to pull the base Ubuntu image
test -n "${http_proxy}" && sed -i "s;INSERT_HTTP_PROXY;${http_proxy};" ubuntu-builder/Dockerfile || exit 0
@@ -33,10 +42,62 @@ all: .docker
test -n "${HTTPS_PROXY}" && sed -i "s;INSERT_HTTPS_PROXY;${HTTPS_PROXY};" ubuntu-builder/Dockerfile || exit 0
test -n "${NO_PROXY}" && sed -i "s;INSERT_NO_PROXY;${NO_PROXY};" ubuntu-builder/Dockerfile || exit 0
sed -i '/INSERT_/d' ubuntu-builder/Dockerfile
- /usr/bin/docker build --rm=true --no-cache=true -t opnfv.org/ubuntu-builder:14.04 ubuntu-builder
- /usr/bin/docker tag ${tag_flags} opnfv.org/ubuntu-builder:14.04 opnfv.org/ubuntu-builder
- touch .docker
+ touch $@
+
+.docker: .dockercfg
+ @if test -f .cacheid -o "$(shell ${DOCKER_EXPIRED})" -eq "0"; then \
+ /usr/bin/docker build --rm=true --no-cache=true \
+ -t ${DOCKER_TAG} ubuntu-builder && \
+ /usr/bin/docker tag ${tag_flags} ${DOCKER_TAG} ${DOCKER_IMG}; \
+ else \
+ echo "Docker: Tag '${DOCKER_TAG}' was created less than" \
+ "${DOCKER_KEEP} ago, skipping re-build."; \
+ fi
+ touch $@
+ test -f .cacheid && $(MAKE) -f Makefile put-cache || exit 0
.PHONY: clean
clean:
- rm -f .docker ubuntu-builder/Dockerfile
+ rm -f .docker* ubuntu-builder/Dockerfile
+
+.PHONY: deepclean
+deepclean: clean clean-cache
+
+##############################################################################
+# Cache operations - only used when building through ci/build.sh
+##############################################################################
+
+# NOTE: For docker, we only get/put cache to fingerprint build scripts and
+# env vars, its cached data holds only an empty .docker file.
+
+# Create a unique hash to be used for getting and putting cache, based on:
+# - ubuntu-builder Dockerfile (includes eventual proxy env vars), runcontext;
+# - The contents of this Makefile
+.cacheid: .dockercfg
+ sha1sum Makefile runcontext $(wildcard ubuntu-builder/*) > .cachedata
+ cat .cachedata | $(CACHETOOL) getid > .cacheid
+
+# Clean local data related to caching - called prior to ordinary build
+.PHONY: clean-cache
+clean-cache:
+ rm -f .cachedata .cacheid
+
+# Try to download cache - called prior to ordinary build
+.PHONY: get-cache
+get-cache: .cacheid
+ @if $(CACHETOOL) check $(shell cat .cacheid); then \
+ if test "$(shell ${DOCKER_EXPIRED})" -eq "0"; then \
+ echo "Docker: Tag '${DOCKER_TAG}' missing" \
+ "or older than ${DOCKER_KEEP}, not using it."; \
+ else \
+ touch .docker; \
+ fi; \
+ else \
+ echo "No cache item found for $(shell cat .cacheid)" ;\
+ exit 0;\
+ fi
+
+# Store cache if not already stored - called after ordinary build
+.PHONY: put-cache
+put-cache: .cacheid
+ @tar cf - .docker | $(CACHETOOL) put $(shell cat .cacheid)
diff --git a/deploy/cloud/deployment.py b/deploy/cloud/deployment.py
index 4a9fcd9a8..ecccc241f 100644
--- a/deploy/cloud/deployment.py
+++ b/deploy/cloud/deployment.py
@@ -9,6 +9,7 @@
import time
import re
+import json
from common import (
N,
@@ -29,8 +30,16 @@ GREP_LINES_OF_LEADING_CONTEXT = 100
GREP_LINES_OF_TRAILING_CONTEXT = 100
LIST_OF_CHAR_TO_BE_ESCAPED = ['[', ']', '"']
-class Deployment(object):
+class DeployNotStart(Exception):
+ """Unable to start deployment"""
+
+
+class NodesGoOffline(Exception):
+ """Nodes goes offline during deployment"""
+
+
+class Deployment(object):
def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict,
no_health_check, deploy_timeout):
@@ -43,7 +52,6 @@ class Deployment(object):
self.pattern = re.compile(
'\d\d\d\d-\d\d-\d\d\s\d\d:\d\d:\d\d')
-
def collect_error_logs(self):
for node_id, roles_blade in self.node_id_roles_dict.iteritems():
log_list = []
@@ -89,7 +97,7 @@ class Deployment(object):
log_msg += details
if log_msg:
- log_list.append(log_msg)
+ log_list.append(log_msg)
if log_list:
role = ('controller' if 'controller' in roles_blade[0]
@@ -99,47 +107,88 @@ class Deployment(object):
for log_msg in log_list:
print(log_msg + '\n')
-
def run_deploy(self):
SLEEP_TIME = 60
- LOG_FILE = 'cloud.log'
+ abort_after = 60 * int(self.deploy_timeout)
+ start = time.time()
log('Starting deployment of environment %s' % self.env_id)
- deploy_proc = run_proc('fuel --env %s deploy-changes | strings > %s'
- % (self.env_id, LOG_FILE))
-
+ deploy_id = None
ready = False
- for i in range(int(self.deploy_timeout)):
- env = parse(exec_cmd('fuel env --env %s' % self.env_id))
- log('Environment status: %s' % env[0][E['status']])
- r, _ = exec_cmd('tail -2 %s | head -1' % LOG_FILE, False)
- if r:
- log(r)
- if env[0][E['status']] == 'operational':
- ready = True
- break
- elif (env[0][E['status']] == 'error'
- or env[0][E['status']] == 'stopped'):
- break
- else:
+ timeout = False
+
+ attempts = 0
+ while attempts < 3:
+ try:
+ if time.time() > start + abort_after:
+ timeout = True
+ break
+ if not deploy_id:
+ deploy_id = self._start_deploy_task()
+ sts, prg, msg = self._deployment_status(deploy_id)
+ if sts == 'error':
+ log('Error during deployment: {}'.format(msg))
+ break
+ if sts == 'running':
+ log('Environmnent deploymnet progress: {}%'.format(prg))
+ elif sts == 'ready':
+ ready = True
+ break
time.sleep(SLEEP_TIME)
-
- if (env[0][E['status']] <> 'operational'
- and env[0][E['status']] <> 'error'
- and env[0][E['status']] <> 'stopped'):
- err('Deployment timed out, environment %s is not operational, snapshot will not be performed'
- % self.env_id, self.collect_logs)
-
- run_proc_wait_terminated(deploy_proc)
- delete(LOG_FILE)
-
+ except (DeployNotStart, NodesGoOffline) as e:
+ log(e)
+ attempts += 1
+ deploy_id = None
+ time.sleep(SLEEP_TIME * attempts)
+
+ if timeout:
+ err('Deployment timed out, environment %s is not operational, '
+ 'snapshot will not be performed'
+ % self.env_id)
if ready:
- log('Environment %s successfully deployed' % self.env_id)
+ log('Environment %s successfully deployed'
+ % self.env_id)
else:
self.collect_error_logs()
err('Deployment failed, environment %s is not operational'
% self.env_id, self.collect_logs)
+ def _start_deploy_task(self):
+ out, _ = exec_cmd('fuel2 env deploy {}'.format(self.env_id), False)
+ id = self._deployment_task_id(out)
+ return id
+
+ def _deployment_task_id(self, response):
+ response = str(response)
+ if response.startswith('Deployment task with id'):
+ for s in response.split():
+ if s.isdigit():
+ return int(s)
+ raise DeployNotStart('Unable to start deployment: {}'.format(response))
+
+ def _deployment_status(self, id):
+ task = self._task_fields(id)
+ if task['status'] == 'error':
+ if task['message'].endswith(
+ 'offline. Remove them from environment and try again.'):
+ raise NodesGoOffline(task['message'])
+ return task['status'], task['progress'], task['message']
+
+ def _task_fields(self, id):
+ try:
+ out, _ = exec_cmd('fuel2 task show {} -f json'.format(id), False)
+ task_info = json.loads(out)
+ properties = {}
+ # for 9.0 this can be list of dicts or dict
+ # see https://bugs.launchpad.net/fuel/+bug/1625518
+ if isinstance(task_info, list):
+ for d in task_info:
+ properties.update({d['Field']: d['Value']})
+ else:
+ return task_info
+ return properties
+ except ValueError as e:
+ err('Unable to fetch task info: {}'.format(e))
def collect_logs(self):
log('Cleaning out any previous deployment logs')
@@ -155,7 +204,6 @@ class Deployment(object):
r, _ = exec_cmd('tar -czhf /root/deploy-%s.log.tar.gz /var/log/remote' % time.strftime("%Y%m%d-%H%M%S"), False)
log(r)
-
def verify_node_status(self):
node_list = parse(exec_cmd('fuel --env %s node' % self.env_id))
failed_nodes = []
@@ -169,7 +217,6 @@ class Deployment(object):
summary += '[node %s, status %s]\n' % (node, status)
err('Deployment failed: %s' % summary, self.collect_logs)
-
def health_check(self):
log('Now running sanity and smoke health checks')
r = exec_cmd('fuel health --env %s --check sanity,smoke --force' % self.env_id)
@@ -177,7 +224,6 @@ class Deployment(object):
if 'failure' in r:
err('Healthcheck failed!', self.collect_logs)
-
def deploy(self):
self.run_deploy()
self.verify_node_status()
diff --git a/deploy/environments/execution_environment.py b/deploy/environments/execution_environment.py
index 3812902aa..7a0b4744e 100644
--- a/deploy/environments/execution_environment.py
+++ b/deploy/environments/execution_environment.py
@@ -47,6 +47,8 @@ class ExecutionEnvironment(object):
log('Deleting VM %s with disks %s' % (vm_name, disk_files))
exec_cmd('virsh destroy %s' % vm_name, False)
exec_cmd('virsh undefine --managed-save --remove-all-storage %s' % vm_name, False)
+ for file in disk_files:
+ delete(file)
def overwrite_xml(self, vm_xml, vm_definition_overwrite):
if not vm_definition_overwrite: