summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--.gitmodules70
-rw-r--r--INFO4
-rw-r--r--build/Makefile47
-rw-r--r--build/cache.mk2
-rw-r--r--build/config.mk80
-rw-r--r--build/docker/.gitignore2
-rw-r--r--build/docker/Makefile73
-rwxr-xr-xbuild/docker/runcontext4
-rw-r--r--build/f_isoroot/Makefile2
-rw-r--r--build/f_isoroot/f_odlpluginbuild/config.mk7
-rw-r--r--build/f_isoroot/f_ovs-nsh-dpdk-pluginbuild/config.mk2
-rw-r--r--build/f_isoroot/f_repobuild/Makefile11
-rwxr-xr-xbuild/f_isoroot/f_repobuild/select_ubuntu_repo.sh128
-rw-r--r--build/f_isoroot/f_tacker-pluginbuild/Makefile91
-rw-r--r--build/f_isoroot/f_tacker-pluginbuild/config.mk (renamed from build/f_isoroot/f_repobuild/config.mk)11
-rw-r--r--build/f_isoroot/f_tacker-pluginbuild/packages.yaml3
-rw-r--r--build/f_repos/Makefile166
-rw-r--r--build/f_repos/README.md121
-rw-r--r--build/f_repos/patch/fuel-library/0001-Reduce-ceilometer-memory-usage.patch (renamed from build/patch-repos/build/repos/fuel-library/0001-Reduce-ceilometer-memory-usage.patch)8
-rw-r--r--build/f_repos/patch/fuel-library/0002-Disable-token-revoke-to-increase-keystone-performanc.patch40
-rw-r--r--build/f_repos/patch/fuel-main/0001-OPNFV-Additions-to-bootstrap_admin_node.sh.patch (renamed from build/patch-repos/0010-bootstrap_admin_node.sh.patch)13
-rw-r--r--build/f_repos/patch/fuel-main/0002-OPNFV-showmenu-yes-in-isolinux.cfg.patch (renamed from build/patch-repos/0020-isolinux.cfg.patch)5
-rw-r--r--build/f_repos/patch/fuel-main/0003-repo-mirror-Allow-multi-arch-local-mirrors.patch (renamed from build/patch-repos/0030-repo-multi-arch-local-mirrors.patch)30
-rw-r--r--build/f_repos/patch/fuel-main/0004-xorriso-fails-to-add-files-with-path-longer-then-240.patch24
-rw-r--r--build/f_repos/patch/fuel-mirror/0001-Fixed-handling-http-redirects.patch87
-rw-r--r--build/f_repos/patch/fuel-web/0001-Mark-Intel-82599-10-Gigabit-NIC-as-DPDK-capable.patch (renamed from build/patch-repos/build/repos/fuel-nailgun/0010-Mark-Intel-82599-10-Gigabit-NIC-as-DPDK-capable.patch)9
m---------build/f_repos/sub/fuel-agent0
m---------build/f_repos/sub/fuel-astute0
m---------build/f_repos/sub/fuel-library0
m---------build/f_repos/sub/fuel-main0
m---------build/f_repos/sub/fuel-menu0
m---------build/f_repos/sub/fuel-mirror0
m---------build/f_repos/sub/fuel-nailgun-agent0
m---------build/f_repos/sub/fuel-ostf0
m---------build/f_repos/sub/fuel-ui0
m---------build/f_repos/sub/fuel-upgrade0
m---------build/f_repos/sub/fuel-web0
m---------build/f_repos/sub/network-checker0
m---------build/f_repos/sub/python-fuelclient0
m---------build/f_repos/sub/shotgun0
-rwxr-xr-xbuild/install/install.sh6
-rw-r--r--deploy/README6
-rw-r--r--deploy/cloud/deployment.py116
-rw-r--r--deploy/common.py14
-rw-r--r--deploy/config/plugins/fuel-odl_0.9.0.yaml4
-rw-r--r--deploy/config/plugins/fuel-tacker_0.9.0.yaml48
-rw-r--r--deploy/deploy-config.py534
-rw-r--r--deploy/environments/execution_environment.py2
-rw-r--r--deploy/install_fuel_master.py12
-rw-r--r--deploy/scenario/ha_odl-l2_sfc_heat_ceilometer_scenario.yaml9
-rw-r--r--deploy/scenario/ha_odl-l3_heat_ceilometer_scenario.yaml5
-rw-r--r--deploy/scenario/no-ha_odl-l2_sfc_heat_ceilometer_scenario.yaml9
-rw-r--r--deploy/scenario/no-ha_odl-l3_heat_ceilometer_scenario.yaml5
-rwxr-xr-xdeploy/templater.py41
-rw-r--r--deploy/templates/hardware_environment/vms/enea_lab/fuel.xml88
-rw-r--r--docs/installationprocedure/installation.instruction.rst2
-rw-r--r--docs/releasenotes/release-notes.rst20
58 files changed, 1500 insertions, 462 deletions
diff --git a/.gitignore b/.gitignore
index 9b4b3afa3..588243653 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
*.pyc
+.cache*
.project
.pydevproject
ci/config/
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000000000..84b15ee45
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,70 @@
+[submodule "fuel-main"]
+ path = build/f_repos/sub/fuel-main
+ url = https://github.com/openstack/fuel-main.git
+ branch = stable/mitaka
+ ignore = all
+[submodule "fuel-library"]
+ path = build/f_repos/sub/fuel-library
+ url = https://github.com/openstack/fuel-library.git
+ branch = stable/mitaka
+ ignore = all
+[submodule "fuel-web"]
+ path = build/f_repos/sub/fuel-web
+ url = https://github.com/openstack/fuel-web.git
+ branch = stable/mitaka
+ ignore = all
+[submodule "python-fuelclient"]
+ path = build/f_repos/sub/python-fuelclient
+ url = https://github.com/openstack/python-fuelclient.git
+ branch = stable/mitaka
+ ignore = all
+[submodule "fuel-agent"]
+ path = build/f_repos/sub/fuel-agent
+ url = https://github.com/openstack/fuel-agent.git
+ branch = stable/mitaka
+ ignore = all
+[submodule "fuel-nailgun-agent"]
+ path = build/f_repos/sub/fuel-nailgun-agent
+ url = https://github.com/openstack/fuel-nailgun-agent.git
+ branch = stable/mitaka
+ ignore = all
+[submodule "fuel-astute"]
+ path = build/f_repos/sub/fuel-astute
+ url = https://github.com/openstack/fuel-astute.git
+ branch = stable/mitaka
+ ignore = all
+[submodule "fuel-ostf"]
+ path = build/f_repos/sub/fuel-ostf
+ url = https://github.com/openstack/fuel-ostf.git
+ branch = stable/mitaka
+ ignore = all
+[submodule "fuel-mirror"]
+ path = build/f_repos/sub/fuel-mirror
+ url = https://github.com/openstack/fuel-mirror.git
+ branch = stable/mitaka
+ ignore = all
+[submodule "fuel-menu"]
+ path = build/f_repos/sub/fuel-menu
+ url = https://github.com/openstack/fuel-menu.git
+ branch = stable/mitaka
+ ignore = all
+[submodule "shotgun"]
+ path = build/f_repos/sub/shotgun
+ url = https://github.com/openstack/shotgun.git
+ branch = stable/mitaka
+ ignore = all
+[submodule "network-checker"]
+ path = build/f_repos/sub/network-checker
+ url = https://github.com/openstack/network-checker.git
+ branch = stable/mitaka
+ ignore = all
+[submodule "fuel-upgrade"]
+ path = build/f_repos/sub/fuel-upgrade
+ url = https://github.com/openstack/fuel-upgrade.git
+ branch = stable/mitaka
+ ignore = all
+[submodule "fuel-ui"]
+ path = build/f_repos/sub/fuel-ui
+ url = https://github.com/openstack/fuel-ui.git
+ branch = stable/mitaka
+ ignore = all
diff --git a/INFO b/INFO
index d76f159a9..37a1e3cf3 100644
--- a/INFO
+++ b/INFO
@@ -16,9 +16,11 @@ nikolas.hermanns@ericsson.com
jonas.bjurel@ericsson.com
stefan.k.berg@ericsson.com
daniel.smith@ericsson.com
-szilard.cserey@ericsson.com
+szilard.cserey@gmail.com
mskalski@mirantis.com
ruijing.guo@intel.com
+fzhadaev@mirantis.com
+Alexandru.Avadanii@enea.com
Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-07-07-13.59.log.html
Link(s) to approval of additional committers: http://ircbot.wl.linuxfoundation.org/meetings/opnfv-fuel/2016/opnfv-fuel.2016-04-28-11.49.html
diff --git a/build/Makefile b/build/Makefile
index 59d9096c6..fadb8a8ba 100644
--- a/build/Makefile
+++ b/build/Makefile
@@ -21,11 +21,11 @@ SHELL = /bin/bash
export MOSVERSION = 9.0
export ISOSRC = file:$(shell pwd)/fuel-$(MOSVERSION).iso
export ISOCACHE = $(shell pwd)/$(shell basename $(ISOSRC))
-export PRODNO = "OPNFV_FUEL"
+export PRODNO ?= "OPNFV_FUEL"
export REVSTATE = "P0000"
export USER ?= $(shell whoami)
export BUILD_DATE = $(shell date --utc +%Y-%m-%d:%H:%M)
-export OPNFV_GIT_SHA = $(shell git rev-parse HEAD)
+export OPNFV_GIT_SHA ?= $(shell git rev-parse HEAD)
# Store in /etc/fuel_build_id on fuel master
export BUILD_ID := $(PRODNO)_$(BUILD_DATE)_$(OPNFV_GIT_SHA)
@@ -58,7 +58,9 @@ export MIRROR_UBUNTU_ROOT := $(shell echo -n '/' ; echo "$(MIRROR_UBUNTU_URL)" |
export LATEST_MIRROR_ID_URL := http://$(shell ./select_closest_fuel_mirror.py)
export MIRROR_MOS_UBUNTU ?= $(shell echo "$(LATEST_MIRROR_ID_URL)" | cut -d'/' -f3)
-export LATEST_TARGET_UBUNTU := $(shell curl -sSf "$(MIRROR_MOS_UBUNTU)/mos-repos/ubuntu/$(MOSVERSION).target.txt" | head -1)
+#export LATEST_TARGET_UBUNTU := $(shell curl -sSf "$(MIRROR_MOS_UBUNTU)/mos-repos/ubuntu/$(MOSVERSION).target.txt" | head -1)
+# Fuel 9.0
+export LATEST_TARGET_UBUNTU := snapshots/9.0-2016-06-23-164100-copy
export MIRROR_MOS_UBUNTU_ROOT := /mos-repos/ubuntu/$(LATEST_TARGET_UBUNTU)
export LATEST_TARGET_CENTOS := $(shell curl -sSf "$(LATEST_MIRROR_ID_URL)/mos-repos/centos/mos$(MOSVERSION)-centos7/os.target.txt" | head -1)
@@ -80,10 +82,7 @@ ORIGDIR := $(TOPDIR)/origiso
# END of variables to customize
#############################################################################
-# Fuel-main destination path and fuel-* submodule patching, for full list check:
-# https://github.com/openstack/fuel-main/blob/stable/mitaka/repos.mk#L32-L44
FUEL_MAIN_DIR := /tmp/fuel-main
-FUEL_PATCHES := $(shell find $(BUILD_BASE)/patch-repos -name '*.patch' | sort)
SUBCLEAN = $(addsuffix .clean,$(SUBDIRS))
@@ -111,9 +110,12 @@ include cache.mk
$(ISOCACHE):
# Clone Fuel to non-persistent location and build
sudo rm -rf $(FUEL_MAIN_DIR)
+ $(MAKE) -C f_repos -f Makefile release
git clone $(FUEL_MAIN_REPO) $(FUEL_MAIN_DIR)
- git -C $(FUEL_MAIN_DIR) checkout $(FUEL_MAIN_TAG)
- @echo "fuel" `git -C $(FUEL_MAIN_DIR) rev-parse HEAD` >> $(VERSION_FILE)
+ # Save upstream ref, checkout OPNFV tag (upstream ref + patches)
+ @echo "fuel" `git -C $(FUEL_MAIN_DIR) \
+ rev-parse $(F_OPNFV_TAG)-root` >> $(VERSION_FILE)
+ git -C $(FUEL_MAIN_DIR) checkout $(F_OPNFV_TAG)
# Remove Docker optimizations, otherwise multistrap will fail during
# Fuel build.
sudo rm -f /etc/apt/apt.conf.d/docker*
@@ -134,14 +136,7 @@ $(ISOCACHE):
sudo docker info
# fuel-main Makefiles do not like `make -C`
cd $(FUEL_MAIN_DIR) && make repos
- $(REPOINFO) -r $(FUEL_MAIN_DIR) > gitinfo_fuel.txt
- # OPNFV patches at Fuel build time
- # Need to be commited in order for them to be considered by the Fuel
- # build system
- $(foreach patch,$(FUEL_PATCHES),git \
- -C $(subst $(BUILD_BASE)/patch-repos,$(FUEL_MAIN_DIR),$(dir $(patch))) \
- am --whitespace=nowarn --committer-date-is-author-date $(patch) || \
- (echo 'Error: Failed patching Fuel repos!' ; exit 1);)
+ cp f_repos/.cachefuelinfo gitinfo_fuel.txt
# Repeat build up to three times
sudo -E ./fuel_build_loop
@@ -172,6 +167,7 @@ patch-packages:
.PHONY: clean $(SUBCLEAN)
clean: $(SUBCLEAN)
+ $(MAKE) -C f_repos -f Makefile clean
$(MAKE) -C patch-packages -f Makefile clean
@rm -f *.iso
@rm -Rf release
@@ -182,7 +178,8 @@ clean: $(SUBCLEAN)
.PHONY: deepclean
deepclean: clean clean-cache
- make -C docker clean
+ $(MAKE) -C f_repos -f Makefile deepclean
+ $(MAKE) -C docker clean
docker rmi opnfv.org/ubuntu-builder:14.04 &>/dev/null || exit 0
docker rmi opnfv.org/ubuntu-builder:latest &>/dev/null || exit 0
docker rmi ubuntu:14.04 &>/dev/null || exit 0
@@ -224,24 +221,26 @@ debug:
#############################################################################
# Create a unique hash to be used for getting and putting cache, based on:
-# - The commit ID of the full Fuel repo structre
+# - The commit ID of the full Fuel repo structure
# - The contents of all local Fuel patches
+# - Makefile, config and sensitive build scripts fingerprints
+# NOTE: Patching is deterministic, so we can fingerprint (submodule root commit
+# info + OPNFV patches) at once, after patches-import.
+# NOTE: When git submodule remote tracking is active, `git submodule status`
+# will point to the latest commits (remote/branch/HEAD) + OPNFV patches.
.cacheid:
- git clone $(FUEL_MAIN_REPO) $(FUEL_MAIN_DIR)
- git -C $(FUEL_MAIN_DIR) checkout $(FUEL_MAIN_TAG)
- make -C $(FUEL_MAIN_DIR) repos
- $(REPOINFO) -r $(FUEL_MAIN_DIR) > .cachedata
- $(foreach patch,$(FUEL_PATCHES),sha1sum $(patch) >> .cachedata;)
+ $(MAKE) -C f_repos -f Makefile get-cache release
+ git submodule status | cut -c2-41 > .cachedata
sha1sum fuel_build_loop >> .cachedata
sha1sum config.mk >> .cachedata
sha1sum Makefile >> .cachedata
$(CACHETOOL) getbiweek >> .cachedata
cat .cachedata | $(CACHETOOL) getid > .cacheid
- # Not removing fuel-main as it is re-used in build
# Clean local data related to caching - called prior to ordinary build
.PHONY: clean-cache
clean-cache: $(SUBCLEANCACHE)
+ $(MAKE) -C f_repos -f Makefile clean-cache
rm -f .cachedata .cacheid
# Try to download cache - called prior to ordinary build
diff --git a/build/cache.mk b/build/cache.mk
index f6db01797..a65f310d0 100644
--- a/build/cache.mk
+++ b/build/cache.mk
@@ -51,5 +51,5 @@ cache:
exit 1; \
fi
@docker version >/dev/null 2>&1 || (echo 'No Docker installation available'; exit 1)
- @make -C docker
+ @make -C docker get-cache all
docker/runcontext $(DOCKERIMG) $(MAKE) $(MAKEFLAGS) cached-all
diff --git a/build/config.mk b/build/config.mk
index f78043e18..ab278589e 100644
--- a/build/config.mk
+++ b/build/config.mk
@@ -1,5 +1,5 @@
##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
+# Copyright (c) 2015,2016 Ericsson AB and others.
# stefan.k.berg@ericsson.com
# jonas.bjurel@ericsson.com
# All rights reserved. This program and the accompanying materials
@@ -8,34 +8,64 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-FUEL_MAIN_REPO := https://github.com/openstack/fuel-main
-FUEL_MAIN_TAG := 9.0.1
-MOS_VERSION = 9.0
+# This tag is NOT checked out, it only serves a cosmetic purpose of hinting
+# what upstream Fuel components our submodules are bound to (while tracking
+# remotes, ALL submodules will point to remote branch HEAD).
+# NOTE: Pinning fuel-main or other submodules to a specific commit/tag is
+# done ONLY via git submodules.
+FUEL_MAIN_TAG = 9.0.1
+MOS_VERSION = 9.0
OPENSTACK_VERSION = mitaka-9.0
-# Pinning down exact Fuel repo versions for Fuel 9.0.1
-export FUELLIB_COMMIT?=e283b62750d9e26355981b3ad3be7c880944ae0f
-export NAILGUN_COMMIT?=e2b85bafb68c348f25cb7cceda81edc668ba2e64
-export PYTHON_FUELCLIENT_COMMIT?=67d8c693a670d27c239d5d175f3ea2a0512c498c
-export FUEL_AGENT_COMMIT?=7ffbf39caf5845bd82b8ce20a7766cf24aa803fb
-export FUEL_NAILGUN_AGENT_COMMIT?=46fa0db0f8944f9e67699d281d462678aaf4db26
-export ASTUTE_COMMIT?=390b257240d49cc5e94ed5c4fcd940b5f2f6ec64
-export OSTF_COMMIT?=f09c98ff7cc71ee612b2450f68a19f2f9c64345a
-export FUEL_MIRROR_COMMIT?=d1ef06b530ce2149230953bb3810a88ecaff870c
-export FUELMENU_COMMIT?=0ed9e206ed1c6271121d3acf52a6bf757411286b
-export SHOTGUN_COMMIT?=781a8cfa0b6eb290e730429fe2792f2b6f5e0c11
-export NETWORKCHECKER_COMMIT?=fcb47dd095a76288aacf924de574e39709e1f3ca
-export FUELUPGRADE_COMMIT?=c1c4bac6a467145ac4fac73e4a7dd2b00380ecfb
-export FUEL_UI_COMMIT?=90de7ef4477230cb7335453ed26ed4306ca6f04f
-
-# for the patches applying purposes
+# FIXME(alav): Disable remote tracking for now, stick to submodule commits
+FUEL_TRACK_REMOTES =
+
+##############################################################################
+# Fuel components pinning / remote tracking; use submodules from f_repos
+##############################################################################
+
+# git submodule & patch locations for Fuel components
+F_GIT_ROOT := $(shell git rev-parse --show-toplevel)
+F_GIT_DIR := $(shell git rev-parse --git-dir)
+F_SUBMOD_DIR := ${F_GIT_ROOT}/build/f_repos/sub
+F_PATCH_DIR := ${F_GIT_ROOT}/build/f_repos/patch
+F_OPNFV_TAG := ${FUEL_MAIN_TAG}-opnfv
+
+# fuel-main repo location used by main Makefile ISO building, use submodule
+FUEL_MAIN_REPO := ${F_SUBMOD_DIR}/fuel-main
+
+export FUELLIB_REPO?=${F_SUBMOD_DIR}/fuel-library
+export NAILGUN_REPO?=${F_SUBMOD_DIR}/fuel-web
+export PYTHON_FUELCLIENT_REPO?=${F_SUBMOD_DIR}/python-fuelclient
+export FUEL_AGENT_REPO?=${F_SUBMOD_DIR}/fuel-agent
+export FUEL_NAILGUN_AGENT_REPO?=${F_SUBMOD_DIR}/fuel-nailgun-agent
+export ASTUTE_REPO?=${F_SUBMOD_DIR}/fuel-astute
+export OSTF_REPO?=${F_SUBMOD_DIR}/fuel-ostf
+export FUEL_MIRROR_REPO?=${F_SUBMOD_DIR}/fuel-mirror
+export FUELMENU_REPO?=${F_SUBMOD_DIR}/fuel-menu
+export SHOTGUN_REPO?=${F_SUBMOD_DIR}/shotgun
+export NETWORKCHECKER_REPO?=${F_SUBMOD_DIR}/network-checker
+export FUELUPGRADE_REPO?=${F_SUBMOD_DIR}/fuel-upgrade
+export FUEL_UI_REPO?=${F_SUBMOD_DIR}/fuel-ui
+
+# OPNFV tags are automatically applied by `make -C f_repos patches-import`
+export FUELLIB_COMMIT?=${F_OPNFV_TAG}
+export NAILGUN_COMMIT?=${F_OPNFV_TAG}
+export PYTHON_FUELCLIENT_COMMIT?=${F_OPNFV_TAG}
+export FUEL_AGENT_COMMIT?=${F_OPNFV_TAG}
+export FUEL_NAILGUN_AGENT_COMMIT?=${F_OPNFV_TAG}
+export ASTUTE_COMMIT?=${F_OPNFV_TAG}
+export OSTF_COMMIT?=${F_OPNFV_TAG}
+export FUEL_MIRROR_COMMIT?=${F_OPNFV_TAG}
+export FUELMENU_COMMIT?=${F_OPNFV_TAG}
+export SHOTGUN_COMMIT?=${F_OPNFV_TAG}
+export NETWORKCHECKER_COMMIT?=${F_OPNFV_TAG}
+export FUELUPGRADE_COMMIT?=${F_OPNFV_TAG}
+export FUEL_UI_COMMIT?=${F_OPNFV_TAG}
+
+# for the patches applying purposes (empty git config in docker build container)
export GIT_COMMITTER_NAME?=Fuel OPNFV
export GIT_COMMITTER_EMAIL?=fuel@opnfv.org
DOCKER_REPO := http://get.docker.com/builds/Linux/x86_64
DOCKER_TAG := docker-latest
-
-.PHONY: get-fuel-repo
-get-fuel-repo:
- @echo $(FUEL_MAIN_REPO) $(FUEL_MAIN_TAG)
-
diff --git a/build/docker/.gitignore b/build/docker/.gitignore
new file mode 100644
index 000000000..2585910ce
--- /dev/null
+++ b/build/docker/.gitignore
@@ -0,0 +1,2 @@
+.docker*
+ubuntu-builder/Dockerfile
diff --git a/build/docker/Makefile b/build/docker/Makefile
index d4423b0a2..783881e8d 100644
--- a/build/docker/Makefile
+++ b/build/docker/Makefile
@@ -1,5 +1,5 @@
##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
+# Copyright (c) 2015,2016 Ericsson AB and others.
# stefan.k.berg@ericsson.com
# jonas.bjurel@ericsson.com
# All rights reserved. This program and the accompanying materials
@@ -12,6 +12,15 @@ SHELL = /bin/bash
FILES = $(wildcard ubuntu-builder/*) runcontext
DOCKER_VER := $(shell [[ "$$(docker version --format '{{.Client.Version}}')" =~ ([0-9]+)\.([0-9]+) ]] && echo $$(( $${BASH_REMATCH[1]} * 100 + $${BASH_REMATCH[2]} )))
+# Builder tag lifespan, force container rebuild X days after tag creation
+DOCKER_KEEP = 1 days
+DOCKER_IMG = opnfv.org/ubuntu-builder
+DOCKER_TAG = ${DOCKER_IMG}:14.04
+# Shell contruct for checking our tag object did not expire
+DOCKER_EXPIRED = D_TAG_BIRTH=`docker inspect --format="{{.Created}}" \
+ ${DOCKER_TAG} 2>/dev/null`; test -z "$$D_TAG_BIRTH" -o `date +%s` -gt \
+ `date -d "$$D_TAG_BIRTH +${DOCKER_KEEP}" +%s`; echo $$?
+
# Don't use -f flag when docker is newer than 1.9
# https://docs.docker.com/engine/deprecated/#/f-flag-on-docker-tag
ifeq ($(shell echo "$(DOCKER_VER)>109" | bc), 1)
@@ -23,7 +32,7 @@ endif
.PHONY: all
all: .docker
-.docker: $(FILES)
+.dockercfg: $(FILES)
cp Dockerfile ubuntu-builder/Dockerfile
# Only add proxy ENVs where set in host - needed to pull the base Ubuntu image
test -n "${http_proxy}" && sed -i "s;INSERT_HTTP_PROXY;${http_proxy};" ubuntu-builder/Dockerfile || exit 0
@@ -33,10 +42,62 @@ all: .docker
test -n "${HTTPS_PROXY}" && sed -i "s;INSERT_HTTPS_PROXY;${HTTPS_PROXY};" ubuntu-builder/Dockerfile || exit 0
test -n "${NO_PROXY}" && sed -i "s;INSERT_NO_PROXY;${NO_PROXY};" ubuntu-builder/Dockerfile || exit 0
sed -i '/INSERT_/d' ubuntu-builder/Dockerfile
- /usr/bin/docker build --rm=true --no-cache=true -t opnfv.org/ubuntu-builder:14.04 ubuntu-builder
- /usr/bin/docker tag ${tag_flags} opnfv.org/ubuntu-builder:14.04 opnfv.org/ubuntu-builder
- touch .docker
+ touch $@
+
+.docker: .dockercfg
+ @if test -f .cacheid -o "$(shell ${DOCKER_EXPIRED})" -eq "0"; then \
+ /usr/bin/docker build --rm=true --no-cache=true \
+ -t ${DOCKER_TAG} ubuntu-builder && \
+ /usr/bin/docker tag ${tag_flags} ${DOCKER_TAG} ${DOCKER_IMG}; \
+ else \
+ echo "Docker: Tag '${DOCKER_TAG}' was created less than" \
+ "${DOCKER_KEEP} ago, skipping re-build."; \
+ fi
+ touch $@
+ test -f .cacheid && $(MAKE) -f Makefile put-cache || exit 0
.PHONY: clean
clean:
- rm -f .docker ubuntu-builder/Dockerfile
+ rm -f .docker* ubuntu-builder/Dockerfile
+
+.PHONY: deepclean
+deepclean: clean clean-cache
+
+##############################################################################
+# Cache operations - only used when building through ci/build.sh
+##############################################################################
+
+# NOTE: For docker, we only get/put cache to fingerprint build scripts and
+# env vars, its cached data holds only an empty .docker file.
+
+# Create a unique hash to be used for getting and putting cache, based on:
+# - ubuntu-builder Dockerfile (includes eventual proxy env vars), runcontext;
+# - The contents of this Makefile
+.cacheid: .dockercfg
+ sha1sum Makefile runcontext $(wildcard ubuntu-builder/*) > .cachedata
+ cat .cachedata | $(CACHETOOL) getid > .cacheid
+
+# Clean local data related to caching - called prior to ordinary build
+.PHONY: clean-cache
+clean-cache:
+ rm -f .cachedata .cacheid
+
+# Try to download cache - called prior to ordinary build
+.PHONY: get-cache
+get-cache: .cacheid
+ @if $(CACHETOOL) check $(shell cat .cacheid); then \
+ if test "$(shell ${DOCKER_EXPIRED})" -eq "0"; then \
+ echo "Docker: Tag '${DOCKER_TAG}' missing" \
+ "or older than ${DOCKER_KEEP}, not using it."; \
+ else \
+ touch .docker; \
+ fi; \
+ else \
+ echo "No cache item found for $(shell cat .cacheid)" ;\
+ exit 0;\
+ fi
+
+# Store cache if not already stored - called after ordinary build
+.PHONY: put-cache
+put-cache: .cacheid
+ @tar cf - .docker | $(CACHETOOL) put $(shell cat .cacheid)
diff --git a/build/docker/runcontext b/build/docker/runcontext
index daad663ac..b17571135 100755
--- a/build/docker/runcontext
+++ b/build/docker/runcontext
@@ -111,11 +111,13 @@ if [ -n "$CACHEBASE" ]; then
fi
fi
+# FIXME: TERM is required because: https://github.com/docker/docker/issues/9299
RUN_CONTEXT_OPT="--cidfile $CID_FILE --privileged=true --rm \
+ -e TERM=$TERM \
-e HOME=$HOME -e CACHEDEBUG -e CACHETRANSPORT -e CACHEMAXAGE -e CACHEBASE \
-e BUILD_FUEL_PLUGINS -e MIRROR_UBUNTU -e MIRROR_UBUNTU_ROOT \
-e MIRROR_MOS_UBUNTU -e MIRROR_MOS_UBUNTU_ROOT -e MIRROR_FUEL \
- -e LATEST_TARGET_UBUNTU -e UBUNTU_ARCH \
+ -e LATEST_TARGET_UBUNTU -e UBUNTU_ARCH -e OPNFV_GIT_SHA \
-u $USER_ID:$GROUP_ID -w $PWD \
-v $GITROOT:$GITROOT -v /sys/fs/cgroup:/sys/fs/cgroup:ro $CACHEMOUNT"
diff --git a/build/f_isoroot/Makefile b/build/f_isoroot/Makefile
index 448f4158c..1539783c9 100644
--- a/build/f_isoroot/Makefile
+++ b/build/f_isoroot/Makefile
@@ -11,7 +11,7 @@
# Add Fuel plugin build targets here
# Plugins disabled for the Fuel Mitaka uplift. Please re-enable your plugin as you have
# verified it!
-PLUGINS ?= f_odlpluginbuild f_onosfwpluginbuild f_vsperfpluginbuild f_ovs-nsh-dpdk-pluginbuild f_yardstick-pluginbuild f_kvm-pluginbuild f_bgpvpn-pluginbuild f_collectd-ceilometer-pluginbuild
+PLUGINS ?= f_odlpluginbuild f_onosfwpluginbuild f_vsperfpluginbuild f_ovs-nsh-dpdk-pluginbuild f_yardstick-pluginbuild f_kvm-pluginbuild f_bgpvpn-pluginbuild f_collectd-ceilometer-pluginbuild f_tacker-pluginbuild
#PLUGINS = f_odlpluginbuild f_onosfwpluginbuild f_ovsnfv-dpdk-pluginbuild f_vsperfpluginbuild f_ovs-nsh-dpdk-pluginbuild f_bgpvpn-pluginbuild
export PLUGINS
#NON_8-0_REBASED_PLUGINS = f_bgpvpn-pluginbuild
diff --git a/build/f_isoroot/f_odlpluginbuild/config.mk b/build/f_isoroot/f_odlpluginbuild/config.mk
index 466aba2ae..55facfe4f 100644
--- a/build/f_isoroot/f_odlpluginbuild/config.mk
+++ b/build/f_isoroot/f_odlpluginbuild/config.mk
@@ -8,9 +8,8 @@
##############################################################################
FUEL_PLUGIN_ODL_BRANCH ?= master
-FUEL_PLUGIN_ODL_CHANGE ?= 0ff4ebdbfe5f681f0b44eea14c9c2b3dbb10b1d5
+FUEL_PLUGIN_ODL_CHANGE ?= 7281b0db9171e5aeda0c23ba655f086a909395a1
FUEL_PLUGIN_ODL_REPO ?= https://github.com/openstack/fuel-plugin-opendaylight.git
-export ODL_TARBALL_LOCATION?=https://nexus.opendaylight.org/content/repositories/public/org/opendaylight/integration/distribution-karaf/0.4.3-Beryllium-SR3/distribution-karaf-0.4.3-Beryllium-SR3.tar.gz
-export ODL_VERSION_NUMBER?=0.4.3
-export ODL_BORON_TARBALL_LOCATION?=http://artifacts.opnfv.org/fuel/colorado/vendor/distribution-karaf-0.5.0-Boron-RC3.5.tar.gz
+export ODL_TARBALL_LOCATION?=https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.5.0-Boron/distribution-karaf-0.5.0-Boron.tar.gz
+export ODL_VERSION_NUMBER?=0.5.0
diff --git a/build/f_isoroot/f_ovs-nsh-dpdk-pluginbuild/config.mk b/build/f_isoroot/f_ovs-nsh-dpdk-pluginbuild/config.mk
index 8effbd26b..f0392ac24 100644
--- a/build/f_isoroot/f_ovs-nsh-dpdk-pluginbuild/config.mk
+++ b/build/f_isoroot/f_ovs-nsh-dpdk-pluginbuild/config.mk
@@ -7,6 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-OVS_NSH_DPDK_BRANCH?=074aadce66ef3cf322a00a3cff5c57deb84088b5
+OVS_NSH_DPDK_BRANCH?=b3aed46ae0f1c86a1d6ad9058b2260844d591fbd
OVS_NSH_DPDK_REPO?=https://review.openstack.org/openstack/fuel-plugin-ovs
OVS_NSH_DPDK_CHANGE?=
diff --git a/build/f_isoroot/f_repobuild/Makefile b/build/f_isoroot/f_repobuild/Makefile
index 7c6b16304..8beb8824f 100644
--- a/build/f_isoroot/f_repobuild/Makefile
+++ b/build/f_isoroot/f_repobuild/Makefile
@@ -13,7 +13,6 @@ TOP := $(shell pwd)
TMP_ROOT_DIR := $(shell echo "$(MIRROR_UBUNTU_ROOT)" | cut -d "/" -f2)
include ../../config.mk
-include config.mk
export MOS_VERSION
export OPENSTACK_VERSION
@@ -25,8 +24,7 @@ nailgun:
sudo apt-get install -y git libxml2-dev libxslt-dev python-dev python-pip libz-dev libyaml-dev createrepo python-yaml
rm -Rf nailgun
sudo mkdir -p /var/www/nailgun
- git clone $(FUEL_MIRROR_REPO)
- cd fuel-mirror && git checkout -q $(FUEL_MIRROR_COMMIT)
+ ln -sf ${F_SUBMOD_DIR}/fuel-mirror fuel-mirror
sudo pip install -U -r ./fuel-mirror/requirements.txt
sudo pip install ./fuel-mirror
sudo pip install ./fuel-mirror/contrib/fuel_mirror
@@ -34,7 +32,7 @@ nailgun:
sudo fuel-mirror --debug --config ./opnfv-config.yaml create --group ubuntu --pattern=ubuntu
sudo chmod -R 755 /var/www/nailgun
cp -Rp /var/www/nailgun .
- # On the end we want to have ubuntu repository in mirrors/ubuntu directory
+ # In the end we want to have ubuntu repository in mirrors/ubuntu directory
-if [ "$(MIRROR_UBUNTU_ROOT)" != "/ubuntu/" ]; then \
mkdir -p nailgun/mirrors/ubuntu;\
mv nailgun/mirrors$(MIRROR_UBUNTU_ROOT)* nailgun/mirrors/ubuntu;\
@@ -55,9 +53,9 @@ release:nailgun
@cp -Rp nailgun ../release/opnfv/nailgun
@cp fuel_bootstrap_cli.yaml ../release/opnfv/
-#############################################################################
+############################################################################
# Cache operations - only used when building through ci/build.sh
-#############################################################################
+############################################################################
# Create a unique hash to be used for getting and putting cache, based on:
# - Year and week (causing the cache to be rebuilt weekly)
@@ -65,7 +63,6 @@ release:nailgun
.cacheid:
date +"Repocache %G%V" > .cachedata
sha1sum Makefile >> .cachedata
- sha1sum config.mk >> .cachedata
$(CACHETOOL) packages >> .cachedata
echo -n $(UBUNTU_ARCH) | sha1sum | awk {'print $$1'} >> .cachedata
cat .cachedata | $(CACHETOOL) getid > .cacheid
diff --git a/build/f_isoroot/f_repobuild/select_ubuntu_repo.sh b/build/f_isoroot/f_repobuild/select_ubuntu_repo.sh
index c3bb5cf36..24bd42234 100755
--- a/build/f_isoroot/f_repobuild/select_ubuntu_repo.sh
+++ b/build/f_isoroot/f_repobuild/select_ubuntu_repo.sh
@@ -1,38 +1,102 @@
#!/bin/bash
-##############################################################################
-# Copyright (c) 2015,2016 Ericsson AB and others.
-# mskalski@mirantis.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Try to choose close ubuntu mirror from mirrors.txt, but "whitewash" this
-# against the full repo list to removed mirrors not up-to-date.
-
-# Some Ubuntu mirrors seem less reliable for this type of mirroring -
-# as they are discoved they can be added to the blacklist below in order
-# for them not to be considered.
-BLACKLIST="mirror.clibre.uqam.ca"
-
-for url in $((curl -s https://launchpad.net/ubuntu/+archivemirrors | \
- grep -P -B8 "statusUP|statusSIX" | \
- grep -o -P "(f|ht)tp.*\"" | \
- sed 's/"$//' | sort | uniq; \
- curl -s http://mirrors.ubuntu.com/mirrors.txt | sort | uniq) | \
- sort | uniq -d)
+
+BLACKLIST="http://mirrors.se.eu.kernel.org/ubuntu/"
+#BLACKLIST+=" http://foo.bar"
+
+cleanup() {
+ rm -f $TMPFILE
+}
+
+debugmsg() {
+ test -n "$DEBUG" && echo "$@" >&2
+}
+
+
+# Check if url is blacklisted in this script
+blacklisted () {
+ for blackurl in $BLACKLIST
+ do
+ if [ "$1" == "$blackurl" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+
+# Check mirror's integrity
+check_mirror () {
+ mirror=$1
+ status=0
+ for packdir in dists/trusty-updates/main/binary-amd64 \
+ dists/trusty-updates/restricted/binary-amd64 \
+ dists/trusty-updates/universe/binary-amd64 \
+ dists/trusty-updates/multiverse/binary-amd64 \
+ dists/trusty-security/main/binary-amd64 \
+ dists/trusty-security/restricted/binary-amd64 \
+ dists/trusty-security/universe/binary-amd64 \
+ dists/trusty-security/multiverse/binary-amd64 \
+ dists/trusty-proposed/main/binary-amd64 \
+ dists/trusty-proposed/restricted/binary-amd64 \
+ dists/trusty-proposed/universe/binary-amd64 \
+ dists/trusty-proposed/multiverse/binary-amd64 \
+ dists/trusty/main/binary-amd64 \
+ dists/trusty/restricted/binary-amd64 \
+ dists/trusty/universe/binary-amd64 \
+ dists/trusty/multiverse/binary-amd64 \
+ dists/trusty-backports/main/binary-amd64 \
+ dists/trusty-backports/restricted/binary-amd64 \
+ dists/trusty-backports/universe/binary-amd64 \
+ dists/trusty-backports/multiverse/binary-amd64
+ do
+ for packfile in Release Packages.gz
+ do
+ if [ $status -ne 1 ]; then
+ curl --output /dev/null --silent --head --fail \
+ $mirror/$packdir/$packfile
+ if [ $? -ne 0 ]; then
+ debugmsg "$mirror: Faulty (at least missing $packdir/$packfile)"
+ status=1
+ fi
+ fi
+ done
+ done
+ return $status
+}
+
+if [ "$1" == "-d" ]; then
+ DEBUG=1
+fi
+
+# Hardcode for testing purposes
+DEBUG=1
+
+TMPFILE=$(mktemp /tmp/mirrorsXXXXX)A
+trap cleanup exit
+
+# Generate a list of mirrors considered as "up"
+curl -s https://launchpad.net/ubuntu/+archivemirrors | \
+ grep -P -B8 "statusUP|statusSIX" | \
+ grep -o -P "(f|ht)tp.*\"" | \
+ sed 's/"$//' | sort | uniq > $TMPFILE
+
+# Iterate over "close" mirror, check that they are considered up
+# and sane.
+for url in $(curl -s http://mirrors.ubuntu.com/mirrors.txt)
do
- host=$(echo $url | cut -d'/' -f3)
- echo ${BLACKLIST} | grep -q ${host} && continue
- if curl -s -o /dev/null --head --fail "$url"; then
- echo $url
- exit 0
+ if ! grep -q $url $TMPFILE; then
+ debugmsg "$url Faulty (detected by Ubuntu)"
+ elif blacklisted $url; then
+ debugmsg "$url blacklisted"
+ elif [ -z $BESTURL ]; then
+ if grep -q $url $TMPFILE && check_mirror $url; then
+ debugmsg "$url: OK (setting as primary URL)"
+ BESTURL=$url
+ test -z "$DEBUG" && break
+ fi
else
- continue
+ grep -q $url $TMPFILE && check_mirror $url && debugmsg "$url: OK"
fi
done
-# If no suitable local mirror can be found,
-# the default archive is returned instead.
-echo "http://archive.ubuntu.com/ubuntu/"
+echo "$BESTURL"
diff --git a/build/f_isoroot/f_tacker-pluginbuild/Makefile b/build/f_isoroot/f_tacker-pluginbuild/Makefile
new file mode 100644
index 000000000..997430de1
--- /dev/null
+++ b/build/f_isoroot/f_tacker-pluginbuild/Makefile
@@ -0,0 +1,91 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# geopar@intracom-telecom.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TOP := $(shell pwd)
+
+############################################################################
+# BEGIN of Include definitions
+#
+include config.mk
+#
+# END Include definitions
+#############################################################################
+
+
+.PHONY: all
+all: .tackerbuild
+
+.PHONY: clean
+clean:
+ @rm -f .tackerbuild ../release/opnfv/tacker*.rpm tacker*.rpm
+ @rm -f $(BUILD_BASE)/gitinfo_tackerplugin.txt gitinfo_tackerplugin.txt
+
+.PHONY: release
+release:.tackerbuild
+ @rm -f ../release/opnfv/tacker*.rpm
+ @mkdir -p ../release/tacker
+ @cp tacker*.rpm ../release/opnfv/
+ cp gitinfo_tackerplugin.txt $(BUILD_BASE)
+
+.tackerbuild:
+ rm -rf fuel-plugin-tacker
+ git clone $(TACKER_REPO)
+ cd fuel-plugin-tacker; \
+ git checkout $(TACKER_BRANCH); \
+ if [ ! -z $(TACKER_CHANGE) ]; then \
+ git fetch $(TACKER_REPO) $(TACKER_CHANGE); \
+ git checkout FETCH_HEAD; \
+ fi
+ fpb --debug --build fuel-plugin-tacker/
+ @mv fuel-plugin-tacker/tacker*.rpm .
+ $(REPOINFO) -r . > gitinfo_tackerplugin.txt
+ @rm -rf fuel-plugin-tacker
+ @touch .tackerbuild
+ # Store artifact in cache straight away if caching is enabled
+ # (no .cacheid will be present unless this is a cached build)
+ test -f .cacheid && $(MAKE) -f Makefile put-cache || exit 0
+
+#############################################################################
+# Cache operations - only used when building through ci/build.sh
+#############################################################################
+
+
+# Create a unique hash to be used for getting and putting cache, based on:
+# - The SHA1 hash of the HEAD on the plugin repo's $(TACKER_BRANCH)
+# - The contents of this Makefile
+.cacheid:
+ @if [ ! -z $(TACKER_CHANGE) ]; then \
+ $(CACHETOOL) getcommitid $(TACKER_REPO) $(TACKER_CHANGE) > .cachedata; \
+ else \
+ $(CACHETOOL) getcommitid $(TACKER_REPO) $(TACKER_BRANCH) > .cachedata; \
+ fi
+ @sha1sum Makefile | awk {'print $$1'} >> .cachedata
+ @sha1sum config.mk | awk {'print $$1'} >> .cachedata
+ @echo -n $(UBUNTU_ARCH) | sha1sum | awk {'print $$1'} >> .cachedata
+ @cat .cachedata | $(CACHETOOL) getid > .cacheid
+
+# Clean local data related to caching - called prior to ordinary build
+.PHONY: clean-cache
+clean-cache: clean
+ @rm -f .cachedata .cacheid
+
+# Try to download cache - called prior to ordinary build
+.PHONY: get-cache
+get-cache: .cacheid
+ @if $(CACHETOOL) check $(shell cat .cacheid); then \
+ $(CACHETOOL) get $(shell cat .cacheid) | tar xf -;\
+ else \
+ echo "No cache item found for $(shell cat .cacheid)" ;\
+ exit 0;\
+ fi
+
+# Store cache if not already stored - called after ordinary build
+.PHONY: put-cache
+put-cache: .cacheid
+ @tar cf - .tackerbuild tacker*.rpm gitinfo_tackerplugin.txt | $(CACHETOOL) put $(shell cat .cacheid)
diff --git a/build/f_isoroot/f_repobuild/config.mk b/build/f_isoroot/f_tacker-pluginbuild/config.mk
index d81732849..2de94d57e 100644
--- a/build/f_isoroot/f_repobuild/config.mk
+++ b/build/f_isoroot/f_tacker-pluginbuild/config.mk
@@ -1,13 +1,12 @@
##############################################################################
-# Copyright (c) 2016 Ericsson AB and others.
-# mskalski@mirantis.com
+# Copyright (c) 2015 Ericsson AB and others.
+# geopar@intracom-telecom.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-FUEL_MIRROR_REPO?=https://github.com/openstack/fuel-mirror
-# Point to the commit where 302 redirects are handled
-# https://bugs.launchpad.net/fuel/mitaka/+bug/1593674
-FUEL_MIRROR_COMMIT?=192a3d9f8f993afb12c5108dd9339c6688c23e11
+TACKER_BRANCH?=master
+TACKER_REPO?="https://github.com/openstack/fuel-plugin-tacker"
+TACKER_CHANGE?=e49db23fa2392e2cc42ac15ce22e869ff9cb0e0c
diff --git a/build/f_isoroot/f_tacker-pluginbuild/packages.yaml b/build/f_isoroot/f_tacker-pluginbuild/packages.yaml
new file mode 100644
index 000000000..c7775d120
--- /dev/null
+++ b/build/f_isoroot/f_tacker-pluginbuild/packages.yaml
@@ -0,0 +1,3 @@
+packages:
+ - python-jsonrpclib
+
diff --git a/build/f_repos/Makefile b/build/f_repos/Makefile
new file mode 100644
index 000000000..b5eafff9b
--- /dev/null
+++ b/build/f_repos/Makefile
@@ -0,0 +1,166 @@
+##############################################################################
+# Copyright (c) 2015,2016 Ericsson AB, Enea AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# Alexandru.Avadanii@enea.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+SHELL = /bin/sh
+REPOINFO = $(shell readlink -e ../repo_info.sh)
+FREPODIR = $(shell pwd)
+FPATCHES = $(shell find ${F_PATCH_DIR} -name '*.patch')
+
+include ../config.mk
+
+# NOTE: Mechanism overview is presented in ./README.md.
+
+# Submodule consistent states:
+# - NOT initialized (submodule trees are not populated at all);
+# - initialized, bound to saved commits;
+# - initialized, tracking remote origin (only for FUEL_TRACK_REMOTES);
+# - patched (local patches are applied);
+
+# In order to keep things sort of separate, we should only pass up (to main
+# Makefile) the fully-pactched repos, and gather any fingerprinting info here.
+
+# Fuel@OPNFV relies on upstream git repos (one per component) in 1 of 2 ways:
+# - pinned down to tag objects (e.g. "9.0.1")
+# - tracking upstream remote HEAD on a stable or master branch
+# FIXME(alav): Should we support mixed cases? (e.g. pin down only fuel-main)
+
+# To enable remote tracking, set the following var to any non-empty string.
+# Leaving this var empty will bind each git submodule to its saved commit.
+# NOTE: For non-stable branches, unless overriden, always track remotes
+FUEL_TRACK_REMOTES ?= $(shell git symbolic-ref --short HEAD | grep -v stable)
+
+.PHONY: all
+all: release
+
+##############################################################################
+# git submodule operations - to be used stand-alone or from parent Makefile
+##############################################################################
+
+# Fetch & update git submodules, checkout remote HEAD or saved commit
+# Also gather fingerprints for parent gitinfo_fuel.txt.
+.PHONY: sub
+sub: .cachefuelinfo
+
+.cachefuelinfo:
+ @if [ -n "${FUEL_TRACK_REMOTES}" ]; then \
+ git submodule update --init --remote 2>/dev/null; \
+ else \
+ git submodule update --init 2>/dev/null; \
+ fi
+ @rm -f $@
+ @git submodule -q foreach '${REPOINFO} . >> ${FREPODIR}/$@'
+
+# Generate patches from submodules
+.PHONY: patches-export
+patches-export: sub
+ @git submodule -q foreach ' \
+ SUB_DIR=${F_PATCH_DIR}/$$name; \
+ git tag | awk "!/root/ && /${F_OPNFV_TAG}-fuel/" | while read F_TAG; do \
+ SUB_FEATURE=`dirname $${F_TAG#${F_OPNFV_TAG}-fuel/}`; \
+ echo "`tput setaf 2`-- exporting $$name ($$F_TAG)`tput sgr0`"; \
+ mkdir -p $$SUB_DIR/$${SUB_FEATURE} && \
+ git format-patch --no-signature --ignore-space-at-eol \
+ -o $$SUB_DIR/$$SUB_FEATURE -N $$F_TAG-root..$$F_TAG; \
+ sed -i -e "1{/From: /!d}" -e "s/[[:space:]]*$$//" \
+ $$SUB_DIR/$$SUB_FEATURE/*.patch; \
+ done'
+
+# Apply patches from patch/* to respective submodules
+# We rely on `make sub` and/or `make clean` to checkout correct base
+.PHONY: patches-import
+patches-import: sub .cachepatched
+
+.cachepatched: ${FPATCHES}
+ @$(MAKE) clean
+ @git submodule -q foreach ' \
+ SUB_DIR=${F_PATCH_DIR}/$$name; mkdir -p $$SUB_DIR && \
+ git tag ${F_OPNFV_TAG}-root && \
+ git checkout -q -b opnfv-fuel && \
+ find $$SUB_DIR -type d | sort | while read p_dir; do \
+ SUB_PATCHES=$$(ls $$p_dir/*.patch 2>/dev/null); \
+ if [ -n "$$SUB_PATCHES" ]; then \
+ SUB_FEATURE=$${p_dir#$$SUB_DIR}; \
+ SUB_TAG=${F_OPNFV_TAG}-fuel$$SUB_FEATURE/patch; \
+ echo "`tput setaf 2`-- patching $$name ($$SUB_TAG)`tput sgr0`";\
+ git tag $$SUB_TAG-root && git am -3 --whitespace=nowarn \
+ --committer-date-is-author-date $$SUB_PATCHES && \
+ git tag $$SUB_TAG || exit 1; \
+ fi \
+ done && \
+ git tag ${F_OPNFV_TAG}'
+ @touch $@
+
+# Clean any changes made to submodules, checkout upstream Fuel root commit
+.PHONY: clean
+clean:
+ @cd ${F_GIT_ROOT} && git submodule -q foreach ' \
+ git am -q --abort > /dev/null 2>&1; \
+ git checkout -q -f ${F_OPNFV_TAG}-root > /dev/null 2>&1; \
+ git branch -q -D opnfv-fuel > /dev/null 2>&1; \
+ git tag | grep ${F_OPNFV_TAG} | xargs git tag -d > /dev/null 2>&1; \
+ git reset -q --hard HEAD; \
+ git clean -xdff'
+ @rm -f .cachepatched
+
+.PHONY: deepclean
+deepclean: clean clean-cache
+ @git submodule deinit -f .
+ @rm -f .cache*
+
+.PHONY: release
+release: sub
+ # Store artifact in cache straight away if caching is enabled
+ # (no .cacheid will be present unless this is a cached build)
+ test -f .cacheid && $(MAKE) -f Makefile put-cache || exit 0
+ # NOTE: Patches are not included in cache
+ $(MAKE) -f Makefile patches-import
+
+##############################################################################
+# Cache operations - only used when building through ci/build.sh
+##############################################################################
+
+# NOTE: Current method of collecting submodule refs requires submodules to be
+# NOT initialized <OR> NO patches applied.
+# NOTE: Querying `git submodule status` from parent will show the patched tree.
+# Create a unique hash to be used for getting and putting cache, based on:
+# - git submodule SHAs, collected with `git submodule status`
+# - The contents of this Makefile
+.cacheid:
+ @$(MAKE) clean
+ sha1sum Makefile > .cachedata
+ git submodule status | cut -c2-41 >> .cachedata
+ cat .cachedata | $(CACHETOOL) getid > .cacheid
+
+# Clean local data related to caching - called prior to ordinary build
+.PHONY: clean-cache
+clean-cache: clean
+ rm -f .cachedata .cacheid
+
+# Try to download cache - called prior to ordinary build
+.PHONY: get-cache
+get-cache: .cacheid
+ @if $(CACHETOOL) check $(shell cat .cacheid); then \
+ $(MAKE) clean && \
+ $(CACHETOOL) get $(shell cat .cacheid) | \
+ tar xf - -C ${F_GIT_DIR}; \
+ else \
+ echo "No cache item found for $(shell cat .cacheid)" ;\
+ exit 0;\
+ fi
+
+# Store cache if not already stored - called after ordinary build
+.PHONY: put-cache
+put-cache: .cacheid
+ @if ! $(CACHETOOL) check $(shell cat .cacheid); then \
+ $(MAKE) clean && \
+ tar cf - -C ${F_GIT_DIR} modules | \
+ $(CACHETOOL) put $(shell cat .cacheid); \
+ fi
diff --git a/build/f_repos/README.md b/build/f_repos/README.md
new file mode 100644
index 000000000..0a52fe040
--- /dev/null
+++ b/build/f_repos/README.md
@@ -0,0 +1,121 @@
+Fuel@OPNFV submodule fetching and patching
+==========================================
+
+This directory holds submodule fetching/patching scripts, intended for
+working with upstream Fuel components (fuel-library, ... , fuel-ui) in
+developing/applying OPNFV patches (backports, custom fixes etc.).
+
+The scripts should be friendly to the following 2 use-cases:
+ - development work: easily cloning, binding repos to specific commits,
+ remote tracking, patch development etc.;
+ - to provide parent build scripts an easy method of tracking upstream
+ references and applying OPNFV patches on top;
+
+Also, we need to support at least the following modes of operations:
+ - submodule bind - each submodule patches will be based on the commit ID
+ saved in the .gitmodules config file;
+ - remote tracking - each submodule will sync with the upstream remote
+ and patches will be applied on top of <sub_remote>/<sub_branch>/HEAD;
+
+Workflow (development)
+----------------------
+The standard development workflow should look as follows:
+
+1. Decide whether remote tracking should be active or not:
+ NOTE: Setting the following var to any non-empty str enables remote track.
+ NOTE: Leaving unset will enable remote track for anything but stable branch.
+
+ $ export FUEL_TRACK_REMOTES=""
+
+2. All Fuel sub-projects are registered as submodules. To initialize them, call:
+ If remote tracking is active, upstream remote is queried and latest remote
+ branch HEAD is fetched. Otherwise, checkout commit IDs from .gitmodules.
+
+ $ make sub
+
+3. Apply patches from `patches/<sub-project>/*` to respective submodules via:
+
+ $ make patches-import
+
+ This will result in creation of:
+ - a tag called `${FUEL_MAIN_TAG}-opnfv-root` at the same commit as Fuel@OPNFV
+ upstream reference (bound to git submodule OR tracking remote HEAD);
+ - a new branch `opnfv-fuel` which will hold all the OPNFV patches,
+ each patch is applied on this new branch with `git-am`;
+ - a tag called `${FUEL_MAIN_TAG}-opnfv` at `opnfv-fuel/HEAD`;
+
+4. Modify sub-projects for whatever you need.
+ Commit your changes when you want them taken into account in the build.
+
+5. Re-create patches via:
+
+ $ make patches-export
+
+ Each commit on `opnfv-fuel` branch of each subproject will be
+ exported to `patches/subproject/` via `git format-patch`.
+
+ NOTE: Only commit (-f) submodules when you need to bump upstream ref.
+ NOTE: DO NOT commit patched submodules!
+
+6. Clean workbench branches and tags with:
+
+ $ make clean
+
+7. De-initialize submodules and force a clean clone with:
+
+ $ make deepclean
+
+Workflow (ISO build)
+--------------------
+Parent build scripts require this mechanism to do some fingerprinting,
+so here is the intended flow for all artifacts to be generated right:
+
+1. (Optional) Cached submodules might be fetched from build cache.
+
+2. Submodules are updated
+ We also dump each submodule's git info using repo_info.sh, since
+ we want to collect git refs before patching (i.e. upstream refs).
+
+3. Make target `release` is built
+ This will make sure the modules are in a clean state, put them in cache,
+ then apply the patches.
+
+4. fuel-main's `${FUEL_MAIN_TAG}-opnfv-root` tag is used to determine VERSION info
+ It will accommodate both bound tags and remote tracking references.
+
+Sub-project maintenance
+-----------------------
+1. Adding a new submodule
+ If you need to add another subproject, you can do it with `git submodule`.
+ Make sure that you specify branch (with `-b`), short name (with `--name`)
+ and point it to `upstream/*` directory, i.e.:
+
+ $ git submodule -b stable/mitaka add --name fuel-web \
+ https://github.com/openstack/fuel-web.git upstream/fuel-web
+
+2. Working with remote tracking for upgrading Fuel components
+ Enable remote tracking as described above, which at `make sub` will update
+ ALL submodules (fuel-main, fuel-library, ...) to remote branch (set in
+ .gitmodules) HEAD.
+
+ * If upstream has NOT already tagged a new version, we can still work on
+ our patches, make sure they apply etc., then check for new upstream
+ changes (and that our patches still apply on top of them) by:
+
+ $ make deepclean patches-import
+
+ * If upstream has already tagged a new version we want to pick up, checkout
+ the new tag in each submodule:
+
+ $ git submodule foreach 'git checkout <newtag>'
+
+ * Once satisfied with the patch and submodule changes, commit them:
+ - enforce FUEL_TRACK_REMOTES to "yes" if you want to constatly use the
+ latest remote branch HEAD (as soon as upstream pushes a change on that
+ branch, our next build will automatically include it - risk of our
+ patches colliding with new upstream changes);
+ - stage patch changes if any;
+ - if submodule tags have been updated (relevant when remote tracking is
+ disabled, i.e. we have a stable upstream baseline), add submodules:
+
+ $ make deepclean sub && git add -f sub/*
diff --git a/build/patch-repos/build/repos/fuel-library/0001-Reduce-ceilometer-memory-usage.patch b/build/f_repos/patch/fuel-library/0001-Reduce-ceilometer-memory-usage.patch
index f7fe06884..e51e70b64 100644
--- a/build/patch-repos/build/repos/fuel-library/0001-Reduce-ceilometer-memory-usage.patch
+++ b/build/f_repos/patch/fuel-library/0001-Reduce-ceilometer-memory-usage.patch
@@ -1,11 +1,10 @@
-From 38315ee3945a5444de13918eb29e7771eb9927c7 Mon Sep 17 00:00:00 2001
From: Michael Polenchuk <mpolenchuk@mirantis.com>
Date: Thu, 8 Sep 2016 19:25:14 +0300
-Subject: [PATCH 1/1] Reduce ceilometer memory usage
+Subject: [PATCH] Reduce ceilometer memory usage
Change-Id: I8b1a97bd710c859a3543d1aed8226313f35f4f10
---
- .../manifests/ceilometer/controller.pp | 7 +++++++
+ .../puppet/openstack_tasks/manifests/ceilometer/controller.pp | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/deployment/puppet/openstack_tasks/manifests/ceilometer/controller.pp b/deployment/puppet/openstack_tasks/manifests/ceilometer/controller.pp
@@ -25,6 +24,3 @@ index 2c63ff0..1dda109 100644
+ }
}
}
---
-1.7.9.5
-
diff --git a/build/f_repos/patch/fuel-library/0002-Disable-token-revoke-to-increase-keystone-performanc.patch b/build/f_repos/patch/fuel-library/0002-Disable-token-revoke-to-increase-keystone-performanc.patch
new file mode 100644
index 000000000..d4baa9c1e
--- /dev/null
+++ b/build/f_repos/patch/fuel-library/0002-Disable-token-revoke-to-increase-keystone-performanc.patch
@@ -0,0 +1,40 @@
+From: iberezovskiy <iberezovskiy@mirantis.com>
+Date: Mon, 19 Sep 2016 12:35:05 +0300
+Subject: [PATCH] Disable token revoke to increase keystone performance
+
+Change-Id: I4df816369093908ad1eac372f38c92155efbe8e0
+Closes-bug: #1625077
+---
+ deployment/puppet/openstack_tasks/manifests/keystone/keystone.pp | 2 ++
+ tests/noop/spec/hosts/keystone/keystone_spec.rb | 5 +++++
+ 2 files changed, 7 insertions(+)
+
+diff --git a/deployment/puppet/openstack_tasks/manifests/keystone/keystone.pp b/deployment/puppet/openstack_tasks/manifests/keystone/keystone.pp
+index 3162287..ba9d7df 100644
+--- a/deployment/puppet/openstack_tasks/manifests/keystone/keystone.pp
++++ b/deployment/puppet/openstack_tasks/manifests/keystone/keystone.pp
+@@ -327,6 +327,8 @@ class openstack_tasks::keystone::keystone {
+ memcache_pool_unused_timeout => '60',
+ cache_memcache_servers => $memcache_servers,
+ policy_driver => 'keystone.policy.backends.sql.Policy',
++ # Set revoke_by_id to false according to LP #1625077
++ revoke_by_id => false,
+ }
+
+ Package<| title == 'keystone'|> ~> Service<| title == 'keystone'|>
+diff --git a/tests/noop/spec/hosts/keystone/keystone_spec.rb b/tests/noop/spec/hosts/keystone/keystone_spec.rb
+index b29e691..3c0f847 100644
+--- a/tests/noop/spec/hosts/keystone/keystone_spec.rb
++++ b/tests/noop/spec/hosts/keystone/keystone_spec.rb
+@@ -176,6 +176,11 @@ describe manifest do
+ should contain_class('keystone').with('sync_db' => primary_controller)
+ end
+
++ it 'should declare keystone class with revoke_by_id set to false' do
++ # Set revoke_by_id to false according to LP #1625077
++ should contain_class('keystone').with('revoke_by_id' => false)
++ end
++
+ it 'should configure keystone with paramters' do
+ should contain_keystone_config('token/caching').with(:value => 'false')
+ should contain_keystone_config('cache/enabled').with(:value => 'true')
diff --git a/build/patch-repos/0010-bootstrap_admin_node.sh.patch b/build/f_repos/patch/fuel-main/0001-OPNFV-Additions-to-bootstrap_admin_node.sh.patch
index d3769adfb..446d0b6bd 100644
--- a/build/patch-repos/0010-bootstrap_admin_node.sh.patch
+++ b/build/f_repos/patch/fuel-main/0001-OPNFV-Additions-to-bootstrap_admin_node.sh.patch
@@ -1,10 +1,13 @@
From: Fuel OPNFV <fuel@opnfv.org>
Date: Mon, 13 Jun 2016 22:23:57 +0200
-Subject: OPNFV: Additions to bootstrap_admin_node.sh
+Subject: [PATCH] OPNFV: Additions to bootstrap_admin_node.sh
---
+ iso/bootstrap_admin_node.sh | 39 +++++++++++++++++++++++++++++++--------
+ 1 file changed, 31 insertions(+), 8 deletions(-)
+
diff --git a/iso/bootstrap_admin_node.sh b/iso/bootstrap_admin_node.sh
-index 3197c91..e035145 100755
+index 3197c91..db3123d 100755
--- a/iso/bootstrap_admin_node.sh
+++ b/iso/bootstrap_admin_node.sh
@@ -339,8 +339,22 @@ fuelmenu --save-only --iface=$ADMIN_INTERFACE || fail
@@ -44,7 +47,9 @@ index 3197c91..e035145 100755
if [ ! -f "${ASTUTE_YAML}" ]; then
echo ${fuelmenu_fail_message}
-@@ -377,7 +392,5 @@ if [ ! -f /etc/fuel_build_id ]; then
+@@ -377,9 +392,7 @@ if [ ! -f /etc/fuel_build_id ]; then
+ ln -s ${wwwdir}/${OPENSTACK_VERSION}/ubuntu ${wwwdir}/ubuntu
+ fi
-# Enable sshd
-systemctl enable sshd
@@ -70,7 +75,7 @@ index 3197c91..e035145 100755
if [ "`get_bootstrap_skip`" = "False" ]; then
build_ubuntu_bootstrap bs_status || true
else
-@@ -571,8 +571,8 @@
+@@ -540,8 +563,8 @@ else
fi
#Check if repo is accessible
diff --git a/build/patch-repos/0020-isolinux.cfg.patch b/build/f_repos/patch/fuel-main/0002-OPNFV-showmenu-yes-in-isolinux.cfg.patch
index f09be2a7f..dbff8c7d4 100644
--- a/build/patch-repos/0020-isolinux.cfg.patch
+++ b/build/f_repos/patch/fuel-main/0002-OPNFV-showmenu-yes-in-isolinux.cfg.patch
@@ -1,8 +1,11 @@
From: Fuel OPNFV <fuel@opnfv.org>
Date: Mon, 13 Jun 2016 22:23:57 +0200
-Subject: OPNFV: showmenu=yes in isolinux.cfg
+Subject: [PATCH] OPNFV: showmenu=yes in isolinux.cfg
---
+ iso/isolinux/isolinux.cfg | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
diff --git a/iso/isolinux/isolinux.cfg b/iso/isolinux/isolinux.cfg
index c6b1ed9..77a4b18 100644
--- a/iso/isolinux/isolinux.cfg
diff --git a/build/patch-repos/0030-repo-multi-arch-local-mirrors.patch b/build/f_repos/patch/fuel-main/0003-repo-mirror-Allow-multi-arch-local-mirrors.patch
index d40428743..9469643c3 100644
--- a/build/patch-repos/0030-repo-multi-arch-local-mirrors.patch
+++ b/build/f_repos/patch/fuel-main/0003-repo-mirror-Allow-multi-arch-local-mirrors.patch
@@ -1,6 +1,6 @@
From: Stanislaw Kardach <stanislaw.kardach@cavium.com>
Date: Thu, 25 Feb 2016 13:31:19 +0100
-Subject: repo mirror: Allow multi-arch local mirrors
+Subject: [PATCH] repo mirror: Allow multi-arch local mirrors
This patch allows specifying multiple architectures via UBUNTU_ARCH in
form of a list of space separated architectures. The first architecture
@@ -13,11 +13,24 @@ independent (which is true so far).
Signed-off-by: Stanislaw Kardach <stanislaw.kardach@cavium.com>
Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
---
+ config.mk | 2 +-
mirror/ubuntu/module.mk | 2 +-
sandbox.mk | 2 +-
- config.mk | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
+diff --git a/config.mk b/config.mk
+index 74ee039..45a3b30 100644
+--- a/config.mk
++++ b/config.mk
+@@ -49,7 +49,7 @@ UBUNTU_MINOR:=04
+ UBUNTU_RELEASE_NUMBER:=$(UBUNTU_MAJOR).$(UBUNTU_MINOR)
+ UBUNTU_KERNEL_FLAVOR?=lts-trusty
+ UBUNTU_NETBOOT_FLAVOR?=netboot
+-UBUNTU_ARCH:=amd64
++UBUNTU_ARCH?=amd64
+ UBUNTU_IMAGE_RELEASE:=$(UBUNTU_MAJOR)$(UBUNTU_MINOR)
+ SEPARATE_IMAGES?=/boot,ext2 /,ext4
+
diff --git a/mirror/ubuntu/module.mk b/mirror/ubuntu/module.mk
index 7a9466e..fe1ada2 100644
--- a/mirror/ubuntu/module.mk
@@ -44,16 +57,3 @@ index 4bc3962..5ffddbd 100644
if [ -e $(SANDBOX_UBUNTU)/etc/resolv.conf ]; then sudo cp -a $(SANDBOX_UBUNTU)/etc/resolv.conf $(SANDBOX_UBUNTU)/etc/resolv.conf.orig; fi
sudo cp /etc/resolv.conf $(SANDBOX_UBUNTU)/etc/resolv.conf
if [ -e $(SANDBOX_UBUNTU)/etc/hosts ]; then sudo cp -a $(SANDBOX_UBUNTU)/etc/hosts $(SANDBOX_UBUNTU)/etc/hosts.orig; fi
-diff --git a/config.mk b/config.mk
-index 74ee039..45a3b30 100644
---- a/config.mk
-+++ b/config.mk
-@@ -49,7 +49,7 @@ UBUNTU_MINOR:=04
- UBUNTU_RELEASE_NUMBER:=$(UBUNTU_MAJOR).$(UBUNTU_MINOR)
- UBUNTU_KERNEL_FLAVOR?=lts-trusty
- UBUNTU_NETBOOT_FLAVOR?=netboot
--UBUNTU_ARCH:=amd64
-+UBUNTU_ARCH?=amd64
- UBUNTU_IMAGE_RELEASE:=$(UBUNTU_MAJOR)$(UBUNTU_MINOR)
- SEPARATE_IMAGES?=/boot,ext2 /,ext4
-
diff --git a/build/f_repos/patch/fuel-main/0004-xorriso-fails-to-add-files-with-path-longer-then-240.patch b/build/f_repos/patch/fuel-main/0004-xorriso-fails-to-add-files-with-path-longer-then-240.patch
new file mode 100644
index 000000000..734523040
--- /dev/null
+++ b/build/f_repos/patch/fuel-main/0004-xorriso-fails-to-add-files-with-path-longer-then-240.patch
@@ -0,0 +1,24 @@
+From: Artem Silenkov <asilenkov@mirantis.com>
+Date: Tue, 22 Sep 2015 21:54:44 +0300
+Subject: [PATCH] xorriso fails to add files with path longer then 240
+
+joliet-long option added to xorriso command line
+
+Change-Id: I97016cf3e6a554f44d839740ba7d210337435cdc
+Related-Bug: #1498619
+---
+ iso/module.mk | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/iso/module.mk b/iso/module.mk
+index 8995378..0073068 100644
+--- a/iso/module.mk
++++ b/iso/module.mk
+@@ -174,6 +174,7 @@ $(ISO_PATH): $(BUILD_DIR)/iso/isoroot.done
+ -V $(ISO_VOLUME_ID) -p $(ISO_VOLUME_PREP) \
+ -J -R \
+ -graft-points \
++ -joliet-long \
+ -b isolinux/isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table \
+ -isohybrid-mbr /usr/lib/syslinux/isohdpfx.bin \
+ -eltorito-alt-boot -e images/efiboot.img -no-emul-boot \
diff --git a/build/f_repos/patch/fuel-mirror/0001-Fixed-handling-http-redirects.patch b/build/f_repos/patch/fuel-mirror/0001-Fixed-handling-http-redirects.patch
new file mode 100644
index 000000000..b82be1128
--- /dev/null
+++ b/build/f_repos/patch/fuel-mirror/0001-Fixed-handling-http-redirects.patch
@@ -0,0 +1,87 @@
+From: Bulat Gaifullin <bgaifullin@mirantis.com>
+Date: Fri, 24 Jun 2016 15:52:41 +0200
+Subject: [PATCH] Fixed handling http redirects
+
+Override method 'redirect_request' for patching
+new request, that has been created on handling
+http redirect.
+
+Change-Id: I40db406e2377bebec1113639b91a0b5262e2e9ad
+Closes-Bug: 1593674
+(cherry picked from commit 192a3d9f8f993afb12c5108dd9339c6688c23e11)
+---
+ packetary/library/connections.py | 21 ++++++++++++++++++++-
+ packetary/tests/test_connections.py | 19 +++++++++++++++++++
+ 2 files changed, 39 insertions(+), 1 deletion(-)
+
+diff --git a/packetary/library/connections.py b/packetary/library/connections.py
+index 36a7a84..49b6c9b 100644
+--- a/packetary/library/connections.py
++++ b/packetary/library/connections.py
+@@ -93,9 +93,23 @@ class ResumableResponse(StreamWrapper):
+ self.stream = response.stream
+
+
+-class RetryHandler(urllib.BaseHandler):
++class RetryHandler(urllib.HTTPRedirectHandler):
+ """urllib Handler to add ability for retrying on server errors."""
+
++ def redirect_request(self, req, fp, code, msg, headers, newurl):
++ new_req = urllib.HTTPRedirectHandler.redirect_request(
++ self, req, fp, code, msg, headers, newurl
++ )
++ if new_req is not None:
++ # We use class assignment for casting new request to type
++ # RetryableRequest
++ new_req.__class__ = RetryableRequest
++ new_req.retries_left = req.retries_left
++ new_req.offset = req.offset
++ new_req.start_time = req.start_time
++ new_req.retry_interval = req.retry_interval
++ return new_req
++
+ @staticmethod
+ def http_request(request):
+ """Initialises http request.
+@@ -118,6 +132,11 @@ class RetryHandler(urllib.BaseHandler):
+ :return: ResumableResponse if success otherwise same response
+ """
+ code, msg = response.getcode(), response.msg
++
++ if 300 <= code < 400:
++ # the redirect group, pass to next handler as is
++ return response
++
+ # the server should response partial content if range is specified
+ if request.offset > 0 and code != 206:
+ raise RangeError(msg)
+diff --git a/packetary/tests/test_connections.py b/packetary/tests/test_connections.py
+index a2621c8..c80b03d 100644
+--- a/packetary/tests/test_connections.py
++++ b/packetary/tests/test_connections.py
+@@ -268,6 +268,25 @@ class TestRetryHandler(base.TestCase):
+ self.handler.http_response(request, response_mock)
+ self.handler.parent.open.assert_called_once_with(request)
+
++ @mock.patch(
++ 'packetary.library.connections.urllib.'
++ 'HTTPRedirectHandler.redirect_request'
++ )
++ def test_redirect_request(self, redirect_mock, _):
++ redirect_mock.return_value = connections.urllib.Request(
++ 'http://localhost/'
++ )
++ req = mock.MagicMock(retries_left=10, retry_interval=5, offset=100)
++ new_req = self.handler.redirect_request(req, -1, 301, "", {}, "")
++ self.assertIsInstance(new_req, connections.RetryableRequest)
++ self.assertEqual(req.retries_left, new_req.retries_left)
++ self.assertEqual(req.retry_interval, new_req.retry_interval)
++ self.assertEqual(req.offset, new_req.offset)
++ redirect_mock.return_value = None
++ self.assertIsNone(
++ self.handler.redirect_request(req, -1, 301, "", {}, "")
++ )
++
+
+ class TestResumeableResponse(base.TestCase):
+ def setUp(self):
diff --git a/build/patch-repos/build/repos/fuel-nailgun/0010-Mark-Intel-82599-10-Gigabit-NIC-as-DPDK-capable.patch b/build/f_repos/patch/fuel-web/0001-Mark-Intel-82599-10-Gigabit-NIC-as-DPDK-capable.patch
index d486f2354..6fa1e436c 100644
--- a/build/patch-repos/build/repos/fuel-nailgun/0010-Mark-Intel-82599-10-Gigabit-NIC-as-DPDK-capable.patch
+++ b/build/f_repos/patch/fuel-web/0001-Mark-Intel-82599-10-Gigabit-NIC-as-DPDK-capable.patch
@@ -12,11 +12,12 @@ diff --git a/nailgun/nailgun/fixtures/openstack.yaml b/nailgun/nailgun/fixtures/
index 74fa509..d08a4fc 100644
--- a/nailgun/nailgun/fixtures/openstack.yaml
+++ b/nailgun/nailgun/fixtures/openstack.yaml
-@@ -2215,3 +2215,3 @@
+@@ -2213,7 +2213,7 @@
+ "15ad:07b0", "8086:15a5", "1137:0043", "1137:0071", "14e4:168a",
+ "14e4:16a9", "14e4:164f", "14e4:168e", "14e4:16af", "14e4:163d",
"14e4:163f", "14e4:168d", "14e4:16a1", "14e4:16a2", "14e4:16ad",
- "14e4:16ae", "14e4:163e", "14e4:16a4"
+ "14e4:16ae", "14e4:163e", "14e4:16a4", "8086:10f8"
]
---
-1.9.1
-
+ - pk: 3
+ extend: *ubuntu_release
diff --git a/build/f_repos/sub/fuel-agent b/build/f_repos/sub/fuel-agent
new file mode 160000
+Subproject 7ffbf39caf5845bd82b8ce20a7766cf24aa803f
diff --git a/build/f_repos/sub/fuel-astute b/build/f_repos/sub/fuel-astute
new file mode 160000
+Subproject 390b257240d49cc5e94ed5c4fcd940b5f2f6ec6
diff --git a/build/f_repos/sub/fuel-library b/build/f_repos/sub/fuel-library
new file mode 160000
+Subproject e283b62750d9e26355981b3ad3be7c880944ae0
diff --git a/build/f_repos/sub/fuel-main b/build/f_repos/sub/fuel-main
new file mode 160000
+Subproject d6a22557d132c592b18c6bac90f5f4b8d1aa3ad
diff --git a/build/f_repos/sub/fuel-menu b/build/f_repos/sub/fuel-menu
new file mode 160000
+Subproject 0ed9e206ed1c6271121d3acf52a6bf757411286
diff --git a/build/f_repos/sub/fuel-mirror b/build/f_repos/sub/fuel-mirror
new file mode 160000
+Subproject d1ef06b530ce2149230953bb3810a88ecaff870
diff --git a/build/f_repos/sub/fuel-nailgun-agent b/build/f_repos/sub/fuel-nailgun-agent
new file mode 160000
+Subproject 46fa0db0f8944f9e67699d281d462678aaf4db2
diff --git a/build/f_repos/sub/fuel-ostf b/build/f_repos/sub/fuel-ostf
new file mode 160000
+Subproject f09c98ff7cc71ee612b2450f68a19f2f9c64345
diff --git a/build/f_repos/sub/fuel-ui b/build/f_repos/sub/fuel-ui
new file mode 160000
+Subproject 90de7ef4477230cb7335453ed26ed4306ca6f04
diff --git a/build/f_repos/sub/fuel-upgrade b/build/f_repos/sub/fuel-upgrade
new file mode 160000
+Subproject c1c4bac6a467145ac4fac73e4a7dd2b00380ecf
diff --git a/build/f_repos/sub/fuel-web b/build/f_repos/sub/fuel-web
new file mode 160000
+Subproject e2b85bafb68c348f25cb7cceda81edc668ba2e6
diff --git a/build/f_repos/sub/network-checker b/build/f_repos/sub/network-checker
new file mode 160000
+Subproject fcb47dd095a76288aacf924de574e39709e1f3c
diff --git a/build/f_repos/sub/python-fuelclient b/build/f_repos/sub/python-fuelclient
new file mode 160000
+Subproject 67d8c693a670d27c239d5d175f3ea2a0512c498
diff --git a/build/f_repos/sub/shotgun b/build/f_repos/sub/shotgun
new file mode 160000
+Subproject 781a8cfa0b6eb290e730429fe2792f2b6f5e0c1
diff --git a/build/install/install.sh b/build/install/install.sh
index c63241974..866d30419 100755
--- a/build/install/install.sh
+++ b/build/install/install.sh
@@ -196,8 +196,12 @@ make_iso_image() {
find . -name TRANS.TBL -exec rm {} \;
rm -rf rr_moved
+ if [[ -z "$OPNFV_GIT_SHA" ]]; then
+ OPNFV_GIT_SHA=$(git rev-parse --verify HEAD)
+ fi
+
mkisofs --quiet -r -V "$VOLUMEID" -publisher "$PUBLISHER" \
- -p `git rev-parse --verify HEAD` -J -R -b isolinux/isolinux.bin \
+ -p "$OPNFV_GIT_SHA" -J -R -b isolinux/isolinux.bin \
-no-emul-boot \
-boot-load-size 4 -boot-info-table \
--hide-rr-moved \
diff --git a/deploy/README b/deploy/README
index 40f95ef92..ee6bc3156 100644
--- a/deploy/README
+++ b/deploy/README
@@ -13,9 +13,11 @@ the following dependencies and python modules are required to be installed:
- for Ubuntu:
-sudo apt-get install -y libvirt-bin qemu-kvm python-pip fuseiso mkisofs genisoimage
+sudo apt-get update
+sudo apt-get install -y libvirt-bin qemu-kvm python-pip fuseiso mkisofs \
+genisoimage ipmitool
sudo apt-get install -y python-dev libz-dev libxml2-dev libxslt-dev libyaml-dev
-sudo pip install pyyaml netaddr paramiko lxml scp pycrypto ecdsa
+sudo pip install pyyaml netaddr paramiko lxml scp pycrypto ecdsa amt
During libvirt install the user is added to the libvirtd group, so you have to
logout then login back again
diff --git a/deploy/cloud/deployment.py b/deploy/cloud/deployment.py
index 4a9fcd9a8..ecccc241f 100644
--- a/deploy/cloud/deployment.py
+++ b/deploy/cloud/deployment.py
@@ -9,6 +9,7 @@
import time
import re
+import json
from common import (
N,
@@ -29,8 +30,16 @@ GREP_LINES_OF_LEADING_CONTEXT = 100
GREP_LINES_OF_TRAILING_CONTEXT = 100
LIST_OF_CHAR_TO_BE_ESCAPED = ['[', ']', '"']
-class Deployment(object):
+class DeployNotStart(Exception):
+ """Unable to start deployment"""
+
+
+class NodesGoOffline(Exception):
+ """Nodes goes offline during deployment"""
+
+
+class Deployment(object):
def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict,
no_health_check, deploy_timeout):
@@ -43,7 +52,6 @@ class Deployment(object):
self.pattern = re.compile(
'\d\d\d\d-\d\d-\d\d\s\d\d:\d\d:\d\d')
-
def collect_error_logs(self):
for node_id, roles_blade in self.node_id_roles_dict.iteritems():
log_list = []
@@ -89,7 +97,7 @@ class Deployment(object):
log_msg += details
if log_msg:
- log_list.append(log_msg)
+ log_list.append(log_msg)
if log_list:
role = ('controller' if 'controller' in roles_blade[0]
@@ -99,47 +107,88 @@ class Deployment(object):
for log_msg in log_list:
print(log_msg + '\n')
-
def run_deploy(self):
SLEEP_TIME = 60
- LOG_FILE = 'cloud.log'
+ abort_after = 60 * int(self.deploy_timeout)
+ start = time.time()
log('Starting deployment of environment %s' % self.env_id)
- deploy_proc = run_proc('fuel --env %s deploy-changes | strings > %s'
- % (self.env_id, LOG_FILE))
-
+ deploy_id = None
ready = False
- for i in range(int(self.deploy_timeout)):
- env = parse(exec_cmd('fuel env --env %s' % self.env_id))
- log('Environment status: %s' % env[0][E['status']])
- r, _ = exec_cmd('tail -2 %s | head -1' % LOG_FILE, False)
- if r:
- log(r)
- if env[0][E['status']] == 'operational':
- ready = True
- break
- elif (env[0][E['status']] == 'error'
- or env[0][E['status']] == 'stopped'):
- break
- else:
+ timeout = False
+
+ attempts = 0
+ while attempts < 3:
+ try:
+ if time.time() > start + abort_after:
+ timeout = True
+ break
+ if not deploy_id:
+ deploy_id = self._start_deploy_task()
+ sts, prg, msg = self._deployment_status(deploy_id)
+ if sts == 'error':
+ log('Error during deployment: {}'.format(msg))
+ break
+ if sts == 'running':
+ log('Environmnent deploymnet progress: {}%'.format(prg))
+ elif sts == 'ready':
+ ready = True
+ break
time.sleep(SLEEP_TIME)
-
- if (env[0][E['status']] <> 'operational'
- and env[0][E['status']] <> 'error'
- and env[0][E['status']] <> 'stopped'):
- err('Deployment timed out, environment %s is not operational, snapshot will not be performed'
- % self.env_id, self.collect_logs)
-
- run_proc_wait_terminated(deploy_proc)
- delete(LOG_FILE)
-
+ except (DeployNotStart, NodesGoOffline) as e:
+ log(e)
+ attempts += 1
+ deploy_id = None
+ time.sleep(SLEEP_TIME * attempts)
+
+ if timeout:
+ err('Deployment timed out, environment %s is not operational, '
+ 'snapshot will not be performed'
+ % self.env_id)
if ready:
- log('Environment %s successfully deployed' % self.env_id)
+ log('Environment %s successfully deployed'
+ % self.env_id)
else:
self.collect_error_logs()
err('Deployment failed, environment %s is not operational'
% self.env_id, self.collect_logs)
+ def _start_deploy_task(self):
+ out, _ = exec_cmd('fuel2 env deploy {}'.format(self.env_id), False)
+ id = self._deployment_task_id(out)
+ return id
+
+ def _deployment_task_id(self, response):
+ response = str(response)
+ if response.startswith('Deployment task with id'):
+ for s in response.split():
+ if s.isdigit():
+ return int(s)
+ raise DeployNotStart('Unable to start deployment: {}'.format(response))
+
+ def _deployment_status(self, id):
+ task = self._task_fields(id)
+ if task['status'] == 'error':
+ if task['message'].endswith(
+ 'offline. Remove them from environment and try again.'):
+ raise NodesGoOffline(task['message'])
+ return task['status'], task['progress'], task['message']
+
+ def _task_fields(self, id):
+ try:
+ out, _ = exec_cmd('fuel2 task show {} -f json'.format(id), False)
+ task_info = json.loads(out)
+ properties = {}
+ # for 9.0 this can be list of dicts or dict
+ # see https://bugs.launchpad.net/fuel/+bug/1625518
+ if isinstance(task_info, list):
+ for d in task_info:
+ properties.update({d['Field']: d['Value']})
+ else:
+ return task_info
+ return properties
+ except ValueError as e:
+ err('Unable to fetch task info: {}'.format(e))
def collect_logs(self):
log('Cleaning out any previous deployment logs')
@@ -155,7 +204,6 @@ class Deployment(object):
r, _ = exec_cmd('tar -czhf /root/deploy-%s.log.tar.gz /var/log/remote' % time.strftime("%Y%m%d-%H%M%S"), False)
log(r)
-
def verify_node_status(self):
node_list = parse(exec_cmd('fuel --env %s node' % self.env_id))
failed_nodes = []
@@ -169,7 +217,6 @@ class Deployment(object):
summary += '[node %s, status %s]\n' % (node, status)
err('Deployment failed: %s' % summary, self.collect_logs)
-
def health_check(self):
log('Now running sanity and smoke health checks')
r = exec_cmd('fuel health --env %s --check sanity,smoke --force' % self.env_id)
@@ -177,7 +224,6 @@ class Deployment(object):
if 'failure' in r:
err('Healthcheck failed!', self.collect_logs)
-
def deploy(self):
self.run_deploy()
self.verify_node_status()
diff --git a/deploy/common.py b/deploy/common.py
index 80832e201..dab9602c5 100644
--- a/deploy/common.py
+++ b/deploy/common.py
@@ -57,7 +57,7 @@ def exec_cmd(cmd, check=True, attempts=1, delay=5, verbose=False, mask_args=[],
# a negative value means forever
while attempts != 0:
- attempts = attempts - 1
+ attempts -= 1
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
@@ -74,13 +74,13 @@ def exec_cmd(cmd, check=True, attempts=1, delay=5, verbose=False, mask_args=[],
if check:
if return_code > 0:
stderr = stderr.strip()
- print "Failed command: " + str(masked_cmd)
- print "Command returned response: " + str(stderr)
- print "Command return code: " + str(return_code)
+ print("Failed command: " + str(masked_cmd))
+ print("Command returned response: " + str(stderr))
+ print("Command return code: " + str(return_code))
raise Exception(stderr)
else:
- print "Command: " + str(masked_cmd)
- print str(response)
+ print("Command: " + str(masked_cmd))
+ print(str(response))
return response
return response, return_code
@@ -140,7 +140,7 @@ def warn(message):
def check_file_exists(file_path):
if not os.path.dirname(file_path):
file_path = '%s/%s' % (CWD, file_path)
- if not os.path.isfile(file_path):
+ if not os.access(file_path, os.R_OK):
err('ERROR: File %s not found\n' % file_path)
diff --git a/deploy/config/plugins/fuel-odl_0.9.0.yaml b/deploy/config/plugins/fuel-odl_0.9.0.yaml
index 6caf4834f..9646d021f 100644
--- a/deploy/config/plugins/fuel-odl_0.9.0.yaml
+++ b/deploy/config/plugins/fuel-odl_0.9.0.yaml
@@ -118,9 +118,9 @@ opendaylight:
vpn:
- odl-vpnservice-openstack
odl_deb: opendaylight
- experimental_odl_deb: opendaylight-boron
+ experimental_odl_deb: opendaylight-experimental
use_experimental_odl:
- - enable_sfc
+ - enable_bgpvpn
#plugin_id: Assigned during installation
plugin_version: 0.9.0
restrictions:
diff --git a/deploy/config/plugins/fuel-tacker_0.9.0.yaml b/deploy/config/plugins/fuel-tacker_0.9.0.yaml
new file mode 100644
index 000000000..71e028ffd
--- /dev/null
+++ b/deploy/config/plugins/fuel-tacker_0.9.0.yaml
@@ -0,0 +1,48 @@
+##############################################################################
+# Copyright (c) 2015,2016 Ericsson AB and others.
+# mskalski@mirantis.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+plugin-config-metadata:
+ title: Tacker fuel plugin configuration template
+ version: 0.1
+ created: 03.10.2016
+ comment: None
+tacker:
+ metadata:
+ #chosen_id: Assigned during installation
+ class: plugin
+ default: false
+ enabled: true
+ label: Tacker VNF manager
+ toggleable: true
+ versions:
+ - metadata:
+ group: 'openstack_services'
+ db_password:
+ generator: 'password'
+ user_password:
+ generator: 'password'
+ user: 'tacker'
+ port: 8889
+ service: 'tacker-server'
+ restrictions:
+ - condition: "settings:opendaylight == null or settings:opendaylight.metadata.enabled == false or settings:opendaylight.enable_sfc.value == false"
+ strict: false
+ message: "Please install OpenDaylight Plugin with SFC features enabled"
+ - condition: "settings:fuel-plugin-ovs == null or settings:fuel-plugin-ovs.metadata.enabled == false"
+ strict: false
+ message: "Please install and enable Openvswitch plugin with NSH support."
+ #plugin_id: Assigned during installation
+ plugin_version: 0.2.0
+ debug:
+ value: false
+ label: 'Debug logging'
+ description: 'Debug logging mode provides more information, but requires more disk space.'
+ weight: 25
+ type: "checkbox"
+ weight: 70
diff --git a/deploy/deploy-config.py b/deploy/deploy-config.py
index 8896080db..d2567d97b 100644
--- a/deploy/deploy-config.py
+++ b/deploy/deploy-config.py
@@ -69,8 +69,8 @@ def parse_arguments():
required=True)
parser.add_argument('-scenario', dest='scenario', action='store',
default=False,
- help=('Deployment scenario short-name (priority),'
- 'or base file name (in the absense of a'
+ help=('Deployment scenario short-name (priority), '
+ 'or base file name (in the absense of a '
'shortname defenition)'),
required=True)
@@ -83,7 +83,6 @@ def parse_arguments():
help='Local path for resulting output configuration files',
required=True)
args = parser.parse_args()
- log(args)
kwargs = {'dha_uri': args.dha_uri,
'dea_base_uri': args.dea_base_uri,
'dea_pod_override_uri': args.dea_pod_override_uri,
@@ -162,127 +161,6 @@ def merge_dicts(dict1, dict2):
yield (k, dict1[k])
-setup_yaml()
-kwargs = parse_arguments()
-
-# Generate final dea.yaml by merging following config files/fragments in revers priority order:
-# "dea-base", "dea-pod-override", "deplyment-scenario/module-config-override"
-# and "deployment-scenario/dea-override"
-print('Generating final dea.yaml configuration....')
-
-# Fetch dea-base, extract and purge meta-data
-print('Parsing dea-base from: ' + kwargs["dea_base_uri"] + "....")
-response = urllib2.urlopen(kwargs["dea_base_uri"])
-dea_base_conf = yaml.load(response.read())
-dea_base_title = dea_base_conf['dea-base-config-metadata']['title']
-dea_base_version = dea_base_conf['dea-base-config-metadata']['version']
-dea_base_creation = dea_base_conf['dea-base-config-metadata']['created']
-dea_base_sha = sha_uri(kwargs["dea_base_uri"])
-dea_base_comment = dea_base_conf['dea-base-config-metadata']['comment']
-dea_base_conf.pop('dea-base-config-metadata')
-final_dea_conf = dea_base_conf
-dea_pod_override_nodes = None
-
-# Fetch dea-pod-override, extract and purge meta-data, merge with previous dea data structure
-print('Parsing the dea-pod-override from: ' + kwargs["dea_pod_override_uri"] + "....")
-response = urllib2.urlopen(kwargs["dea_pod_override_uri"])
-dea_pod_override_conf = yaml.load(response.read())
-if dea_pod_override_conf:
- dea_pod_title = dea_pod_override_conf['dea-pod-override-config-metadata']['title']
- dea_pod_version = dea_pod_override_conf['dea-pod-override-config-metadata']['version']
- dea_pod_creation = dea_pod_override_conf['dea-pod-override-config-metadata']['created']
- dea_pod_sha = sha_uri(kwargs["dea_pod_override_uri"])
- dea_pod_comment = dea_pod_override_conf['dea-pod-override-config-metadata']['comment']
- print('Merging dea-base and dea-pod-override configuration ....')
- dea_pod_override_conf.pop('dea-pod-override-config-metadata')
- # Copy the list of original nodes, which holds info on their transformations
- if 'nodes' in dea_pod_override_conf:
- dea_pod_override_nodes = list(dea_pod_override_conf['nodes'])
- if dea_pod_override_conf:
- final_dea_conf = dict(merge_dicts(final_dea_conf, dea_pod_override_conf))
-
-# Fetch deployment-scenario, extract and purge meta-data, merge deployment-scenario/
-# dea-override-configith previous dea data structure
-print('Parsing deployment-scenario from: ' + kwargs["scenario"] + "....")
-
-response = urllib2.urlopen(kwargs["scenario_base_uri"] + "/scenario.yaml")
-scenario_short_translation_conf = yaml.load(response.read())
-if kwargs["scenario"] in scenario_short_translation_conf:
- scenario_uri = (kwargs["scenario_base_uri"]
- + "/"
- + scenario_short_translation_conf[kwargs["scenario"]]['configfile'])
-else:
- scenario_uri = kwargs["scenario_base_uri"] + "/" + kwargs["scenario"]
-response = urllib2.urlopen(scenario_uri)
-deploy_scenario_conf = yaml.load(response.read())
-
-if deploy_scenario_conf:
- deploy_scenario_title = deploy_scenario_conf['deployment-scenario-metadata']['title']
- deploy_scenario_version = deploy_scenario_conf['deployment-scenario-metadata']['version']
- deploy_scenario_creation = deploy_scenario_conf['deployment-scenario-metadata']['created']
- deploy_scenario_sha = sha_uri(scenario_uri)
- deploy_scenario_comment = deploy_scenario_conf['deployment-scenario-metadata']['comment']
- deploy_scenario_conf.pop('deployment-scenario-metadata')
-else:
- print("Deployment scenario file not found or is empty")
- print("Cannot continue, exiting ....")
- sys.exit(1)
-
-dea_scenario_override_conf = deploy_scenario_conf["dea-override-config"]
-if dea_scenario_override_conf:
- print('Merging dea-base-, dea-pod-override- and deployment-scenario '
- 'configuration into final dea.yaml configuration....')
- final_dea_conf = dict(merge_dicts(final_dea_conf, dea_scenario_override_conf))
-
-# Fetch plugin-configuration configuration files, extract and purge meta-data,
-# merge/append with previous dea data structure, override plugin-configuration with
-# deploy-scenario/module-config-override
-modules = []
-module_uris = []
-module_titles = []
-module_versions = []
-module_creations = []
-module_shas = []
-module_comments = []
-if deploy_scenario_conf["stack-extensions"]:
- for module in deploy_scenario_conf["stack-extensions"]:
- print('Loading configuration for module: '
- + module["module"]
- + ' and merging it to final dea.yaml configuration....')
- response = urllib2.urlopen(kwargs["plugins_uri"]
- + '/'
- + module["module-config-name"]
- + '_'
- + module["module-config-version"]
- + '.yaml')
- module_conf = yaml.load(response.read())
- modules.append(module["module"])
- module_uris.append(kwargs["plugins_uri"]
- + '/'
- + module["module-config-name"]
- + '_'
- + module["module-config-version"]
- + '.yaml')
- module_titles.append(str(module_conf['plugin-config-metadata']['title']))
- module_versions.append(str(module_conf['plugin-config-metadata']['version']))
- module_creations.append(str(module_conf['plugin-config-metadata']['created']))
- module_shas.append(sha_uri(kwargs["plugins_uri"]
- + '/'
- + module["module-config-name"]
- + '_'
- + module["module-config-version"]
- + '.yaml'))
- module_comments.append(str(module_conf['plugin-config-metadata']['comment']))
- module_conf.pop('plugin-config-metadata')
- final_dea_conf['settings']['editable'].update(module_conf)
- scenario_module_override_conf = module.get('module-config-override')
- if scenario_module_override_conf:
- dea_scenario_module_override_conf = {}
- dea_scenario_module_override_conf['settings'] = {}
- dea_scenario_module_override_conf['settings']['editable'] = {}
- dea_scenario_module_override_conf['settings']['editable'][module["module"]] = scenario_module_override_conf
- final_dea_conf = dict(merge_dicts(final_dea_conf, dea_scenario_module_override_conf))
-
def get_node_ifaces_and_trans(nodes, nid):
for node in nodes:
if node['id'] == nid:
@@ -293,117 +171,297 @@ def get_node_ifaces_and_trans(nodes, nid):
return None
-if dea_pod_override_nodes:
- for node in final_dea_conf['nodes']:
- data = get_node_ifaces_and_trans(dea_pod_override_nodes, node['id'])
- if data:
- print ("Honoring original interfaces and transformations for "
- "node %d to %s, %s" % (node['id'], data[0], data[1]))
- node['interfaces'] = data[0]
- node['transformations'] = data[1]
-
-# Dump final dea.yaml including configuration management meta-data to argument provided
-# directory
-if not os.path.exists(kwargs["output_path"]):
- os.makedirs(kwargs["output_path"])
-print('Dumping final dea.yaml to ' + kwargs["output_path"] + '/dea.yaml....')
-with open(kwargs["output_path"] + '/dea.yaml', "w") as f:
- f.write("\n".join([("title: DEA.yaml file automatically generated from the"
- 'configuration files stated in the "configuration-files"'
- "fragment below"),
- "version: " + str(calendar.timegm(time.gmtime())),
- "created: " + str(time.strftime("%d/%m/%Y")) + " "
- + str(time.strftime("%H:%M:%S")),
- "comment: none\n"]))
-
- f.write("\n".join(["configuration-files:",
- " dea-base:",
- " uri: " + kwargs["dea_base_uri"],
- " title: " + str(dea_base_title),
- " version: " + str(dea_base_version),
- " created: " + str(dea_base_creation),
- " sha1: " + str(dea_base_sha),
- " comment: " + str(dea_base_comment) + "\n"]))
-
- f.write("\n".join([" pod-override:",
- " uri: " + kwargs["dea_pod_override_uri"],
- " title: " + str(dea_pod_title),
- " version: " + str(dea_pod_version),
- " created: " + str(dea_pod_creation),
- " sha1: " + str(dea_pod_sha),
- " comment: " + str(dea_pod_comment) + "\n"]))
-
- f.write("\n".join([" deployment-scenario:",
- " uri: " + str(scenario_uri),
- " title: " + str(deploy_scenario_title),
- " version: " + str(deploy_scenario_version),
- " created: " + str(deploy_scenario_creation),
- " sha1: " + str(deploy_scenario_sha),
- " comment: " + str(deploy_scenario_comment) + "\n"]))
-
- f.write(" plugin-modules:\n")
- for k, _ in enumerate(modules):
- f.write("\n".join([" - module: " + modules[k],
- " uri: " + module_uris[k],
- " title: " + module_titles[k],
- " version: " + module_versions[k],
- " created: " + module_creations[k],
- " sha-1: " + module_shas[k],
- " comment: " + module_comments[k] + "\n"]))
-
- yaml.dump(final_dea_conf, f, default_flow_style=False)
-
-# Load POD dha and override it with "deployment-scenario/dha-override-config" section
-print('Generating final dha.yaml configuration....')
-print('Parsing dha-pod yaml configuration....')
-response = urllib2.urlopen(kwargs["dha_uri"])
-dha_pod_conf = yaml.load(response.read())
-dha_pod_title = dha_pod_conf['dha-pod-config-metadata']['title']
-dha_pod_version = dha_pod_conf['dha-pod-config-metadata']['version']
-dha_pod_creation = dha_pod_conf['dha-pod-config-metadata']['created']
-dha_pod_sha = sha_uri(kwargs["dha_uri"])
-dha_pod_comment = dha_pod_conf['dha-pod-config-metadata']['comment']
-dha_pod_conf.pop('dha-pod-config-metadata')
-final_dha_conf = dha_pod_conf
-
-dha_scenario_override_conf = deploy_scenario_conf["dha-override-config"]
-# Only virtual deploy scenarios can override dha.yaml since there
-# is no way to programatically override a physical environment:
-# wireing, IPMI set-up, etc.
-# For Physical environments, dha.yaml overrides will be silently ignored
-if dha_scenario_override_conf and (final_dha_conf['adapter'] == 'libvirt'
- or final_dha_conf['adapter'] == 'esxi'
- or final_dha_conf['adapter'] == 'vbox'):
- print('Merging dha-pod and deployment-scenario override information to final dha.yaml configuration....')
- final_dha_conf = dict(merge_dicts(final_dha_conf, dha_scenario_override_conf))
-
-# Dump final dha.yaml to argument provided directory
-print('Dumping final dha.yaml to ' + kwargs["output_path"] + '/dha.yaml....')
-with open(kwargs["output_path"] + '/dha.yaml', "w") as f:
- f.write("\n".join([("title: DHA.yaml file automatically generated from"
- "the configuration files stated in the"
- '"configuration-files" fragment below'),
- "version: " + str(calendar.timegm(time.gmtime())),
- "created: " + str(time.strftime("%d/%m/%Y")) + " "
- + str(time.strftime("%H:%M:%S")),
- "comment: none\n"]))
-
- f.write("configuration-files:\n")
-
- f.write("\n".join([" dha-pod-configuration:",
- " uri: " + kwargs["dha_uri"],
- " title: " + str(dha_pod_title),
- " version: " + str(dha_pod_version),
- " created: " + str(dha_pod_creation),
- " sha-1: " + str(dha_pod_sha),
- " comment: " + str(dha_pod_comment) + "\n"]))
-
- f.write("\n".join([" deployment-scenario:",
- " uri: " + str(scenario_uri),
- " title: " + str(deploy_scenario_title),
- " version: " + str(deploy_scenario_version),
- " created: " + str(deploy_scenario_creation),
- " sha-1: " + str(deploy_scenario_sha),
- " comment: " + str(deploy_scenario_comment) + "\n"]))
-
- yaml.dump(final_dha_conf, f, default_flow_style=False)
+
+class DeployConfig(object):
+ def __init__(self):
+ self.kwargs = parse_arguments()
+ self.dea_conf = dict()
+ self.dea_metadata = dict()
+ self.dea_pod_ovr_metadata = dict()
+ self.dea_pod_ovr_nodes = None
+ self.scenario_metadata = dict()
+ self.modules = []
+ self.module_uris = []
+ self.module_titles = []
+ self.module_versions = []
+ self.module_createds = []
+ self.module_shas = []
+ self.module_comments = []
+ self.dha_pod_conf = dict()
+ self.dha_metadata = dict()
+
+ def process_dea_base(self):
+ # Generate final dea.yaml by merging following config files/fragments in reverse priority order:
+ # "dea-base", "dea-pod-override", "deplyment-scenario/module-config-override"
+ # and "deployment-scenario/dea-override"
+ print('Generating final dea.yaml configuration....')
+
+ # Fetch dea-base, extract and purge meta-data
+ print('Parsing dea-base from: ' + self.kwargs["dea_base_uri"] + "....")
+ response = urllib2.urlopen(self.kwargs["dea_base_uri"])
+ dea_conf = yaml.load(response.read())
+
+ dea_metadata = dict()
+ dea_metadata['title'] = dea_conf['dea-base-config-metadata']['title']
+ dea_metadata['version'] = dea_conf['dea-base-config-metadata']['version']
+ dea_metadata['created'] = dea_conf['dea-base-config-metadata']['created']
+ dea_metadata['sha'] = sha_uri(self.kwargs["dea_base_uri"])
+ dea_metadata['comment'] = dea_conf['dea-base-config-metadata']['comment']
+ self.dea_metadata = dea_metadata
+ dea_conf.pop('dea-base-config-metadata')
+ self.dea_conf = dea_conf
+
+ def process_dea_pod_override(self):
+ # Fetch dea-pod-override, extract and purge meta-data, merge with previous dea data structure
+ print('Parsing the dea-pod-override from: ' + self.kwargs["dea_pod_override_uri"] + "....")
+ response = urllib2.urlopen(self.kwargs["dea_pod_override_uri"])
+ dea_pod_override_conf = yaml.load(response.read())
+
+ if dea_pod_override_conf:
+ metadata = dict()
+ metadata['title'] = dea_pod_override_conf['dea-pod-override-config-metadata']['title']
+ metadata['version'] = dea_pod_override_conf['dea-pod-override-config-metadata']['version']
+ metadata['created'] = dea_pod_override_conf['dea-pod-override-config-metadata']['created']
+ metadata['sha'] = sha_uri(self.kwargs["dea_pod_override_uri"])
+ metadata['comment'] = dea_pod_override_conf['dea-pod-override-config-metadata']['comment']
+ self.dea_pod_ovr_metadata = metadata
+
+ print('Merging dea-base and dea-pod-override configuration ....')
+ dea_pod_override_conf.pop('dea-pod-override-config-metadata')
+
+ # Copy the list of original nodes, which holds info on their transformations
+ if 'nodes' in dea_pod_override_conf:
+ self.dea_pod_ovr_nodes = list(dea_pod_override_conf['nodes'])
+ if dea_pod_override_conf:
+ self.dea_conf = dict(merge_dicts(self.dea_conf, dea_pod_override_conf))
+
+ def get_scenario_uri(self):
+ response = urllib2.urlopen(self.kwargs["scenario_base_uri"] + "/scenario.yaml")
+ scenario_short_translation_conf = yaml.load(response.read())
+ if self.kwargs["scenario"] in scenario_short_translation_conf:
+ scenario_uri = (self.kwargs["scenario_base_uri"]
+ + "/"
+ + scenario_short_translation_conf[self.kwargs["scenario"]]['configfile'])
+ else:
+ scenario_uri = self.kwargs["scenario_base_uri"] + "/" + self.kwargs["scenario"]
+
+ return scenario_uri
+
+ def get_scenario_config(self):
+ self.scenario_metadata['uri'] = self.get_scenario_uri()
+ response = urllib2.urlopen(self.scenario_metadata['uri'])
+ return yaml.load(response.read())
+
+ def process_modules(self):
+ scenario_conf = self.get_scenario_config()
+ if scenario_conf["stack-extensions"]:
+ for module in scenario_conf["stack-extensions"]:
+ print('Loading configuration for module: '
+ + module["module"]
+ + ' and merging it to final dea.yaml configuration....')
+ response = urllib2.urlopen(self.kwargs["plugins_uri"]
+ + '/'
+ + module["module-config-name"]
+ + '_'
+ + module["module-config-version"]
+ + '.yaml')
+ module_conf = yaml.load(response.read())
+ self.modules.append(module["module"])
+ self.module_uris.append(self.kwargs["plugins_uri"]
+ + '/'
+ + module["module-config-name"]
+ + '_'
+ + module["module-config-version"]
+ + '.yaml')
+ self.module_titles.append(str(module_conf['plugin-config-metadata']['title']))
+ self.module_versions.append(str(module_conf['plugin-config-metadata']['version']))
+ self.module_createds.append(str(module_conf['plugin-config-metadata']['created']))
+ self.module_shas.append(sha_uri(self.kwargs["plugins_uri"]
+ + '/'
+ + module["module-config-name"]
+ + '_'
+ + module["module-config-version"]
+ + '.yaml'))
+ self.module_comments.append(str(module_conf['plugin-config-metadata']['comment']))
+ module_conf.pop('plugin-config-metadata')
+ self.dea_conf['settings']['editable'].update(module_conf)
+
+ scenario_module_override_conf = module.get('module-config-override')
+ if scenario_module_override_conf:
+ dea_scenario_module_override_conf = {}
+ dea_scenario_module_override_conf['settings'] = {}
+ dea_scenario_module_override_conf['settings']['editable'] = {}
+ dea_scenario_module_override_conf['settings']['editable'][module["module"]] = scenario_module_override_conf
+ self.dea_conf = dict(merge_dicts(self.dea_conf, dea_scenario_module_override_conf))
+
+ def process_scenario_config(self):
+ # Fetch deployment-scenario, extract and purge meta-data, merge deployment-scenario/
+ # dea-override-configith previous dea data structure
+ print('Parsing deployment-scenario from: ' + self.kwargs["scenario"] + "....")
+
+ scenario_conf = self.get_scenario_config()
+
+ metadata = dict()
+ if scenario_conf:
+ metadata['title'] = scenario_conf['deployment-scenario-metadata']['title']
+ metadata['version'] = scenario_conf['deployment-scenario-metadata']['version']
+ metadata['created'] = scenario_conf['deployment-scenario-metadata']['created']
+ metadata['sha'] = sha_uri(self.scenario_metadata['uri'])
+ metadata['comment'] = scenario_conf['deployment-scenario-metadata']['comment']
+ self.scenario_metadata = metadata
+ scenario_conf.pop('deployment-scenario-metadata')
+ else:
+ print("Deployment scenario file not found or is empty")
+ print("Cannot continue, exiting ....")
+ sys.exit(1)
+
+ dea_scenario_override_conf = scenario_conf["dea-override-config"]
+ if dea_scenario_override_conf:
+ print('Merging dea-base-, dea-pod-override- and deployment-scenario '
+ 'configuration into final dea.yaml configuration....')
+ self.dea_conf = dict(merge_dicts(self.dea_conf, dea_scenario_override_conf))
+
+ self.process_modules()
+
+ # Fetch plugin-configuration configuration files, extract and purge meta-data,
+ # merge/append with previous dea data structure, override plugin-configuration with
+ # deploy-scenario/module-config-override
+
+ if self.dea_pod_ovr_nodes:
+ for node in self.dea_conf['nodes']:
+ data = get_node_ifaces_and_trans(self.dea_pod_ovr_nodes, node['id'])
+ if data:
+ print("Honoring original interfaces and transformations for "
+ "node %d to %s, %s" % (node['id'], data[0], data[1]))
+ node['interfaces'] = data[0]
+ node['transformations'] = data[1]
+
+ def dump_dea_config(self):
+ # Dump final dea.yaml including configuration management meta-data to argument provided
+ # directory
+ path = self.kwargs["output_path"]
+ if not os.path.exists(path):
+ os.makedirs(path)
+ print('Dumping final dea.yaml to ' + path + '/dea.yaml....')
+ with open(path + '/dea.yaml', "w") as f:
+ f.write("\n".join([("title: DEA.yaml file automatically generated from the "
+ 'configuration files stated in the "configuration-files" '
+ "fragment below"),
+ "version: " + str(calendar.timegm(time.gmtime())),
+ "created: " + time.strftime("%d/%m/%Y %H:%M:%S"),
+ "comment: none\n"]))
+
+ f.write("\n".join(["configuration-files:",
+ " dea-base:",
+ " uri: " + self.kwargs["dea_base_uri"],
+ " title: " + str(self.dea_metadata['title']),
+ " version: " + str(self.dea_metadata['version']),
+ " created: " + str(self.dea_metadata['created']),
+ " sha1: " + sha_uri(self.kwargs["dea_base_uri"]),
+ " comment: " + str(self.dea_metadata['comment']) + "\n"]))
+
+ f.write("\n".join([" pod-override:",
+ " uri: " + self.kwargs["dea_pod_override_uri"],
+ " title: " + str(self.dea_pod_ovr_metadata['title']),
+ " version: " + str(self.dea_pod_ovr_metadata['version']),
+ " created: " + str(self.dea_pod_ovr_metadata['created']),
+ " sha1: " + self.dea_pod_ovr_metadata['sha'],
+ " comment: " + str(self.dea_pod_ovr_metadata['comment']) + "\n"]))
+
+ f.write("\n".join([" deployment-scenario:",
+ " uri: " + self.scenario_metadata['uri'],
+ " title: " + str(self.scenario_metadata['title']),
+ " version: " + str(self.scenario_metadata['version']),
+ " created: " + str(self.scenario_metadata['created']),
+ " sha1: " + self.scenario_metadata['sha'],
+ " comment: " + str(self.scenario_metadata['comment']) + "\n"]))
+
+ f.write(" plugin-modules:\n")
+ for k, _ in enumerate(self.modules):
+ f.write("\n".join([" - module: " + self.modules[k],
+ " uri: " + self.module_uris[k],
+ " title: " + str(self.module_titles[k]),
+ " version: " + str(self.module_versions[k]),
+ " created: " + str(self.module_createds[k]),
+ " sha-1: " + self.module_shas[k],
+ " comment: " + str(self.module_comments[k]) + "\n"]))
+
+ yaml.dump(self.dea_conf, f, default_flow_style=False)
+
+ def process_dha_pod_config(self):
+ # Load POD dha and override it with "deployment-scenario/dha-override-config" section
+ print('Generating final dha.yaml configuration....')
+ print('Parsing dha-pod yaml configuration....')
+ response = urllib2.urlopen(self.kwargs["dha_uri"])
+ dha_pod_conf = yaml.load(response.read())
+
+ dha_metadata = dict()
+ dha_metadata['title'] = dha_pod_conf['dha-pod-config-metadata']['title']
+ dha_metadata['version'] = dha_pod_conf['dha-pod-config-metadata']['version']
+ dha_metadata['created'] = dha_pod_conf['dha-pod-config-metadata']['created']
+ dha_metadata['sha'] = sha_uri(self.kwargs["dha_uri"])
+ dha_metadata['comment'] = dha_pod_conf['dha-pod-config-metadata']['comment']
+ self.dha_metadata = dha_metadata
+ dha_pod_conf.pop('dha-pod-config-metadata')
+ self.dha_pod_conf = dha_pod_conf
+
+ scenario_conf = self.get_scenario_config()
+ dha_scenario_override_conf = scenario_conf["dha-override-config"]
+ # Only virtual deploy scenarios can override dha.yaml since there
+ # is no way to programatically override a physical environment:
+ # wireing, IPMI set-up, etc.
+ # For Physical environments, dha.yaml overrides will be silently ignored
+ if dha_scenario_override_conf and (dha_pod_conf['adapter'] == 'libvirt'
+ or dha_pod_conf['adapter'] == 'esxi'
+ or dha_pod_conf['adapter'] == 'vbox'):
+ print('Merging dha-pod and deployment-scenario override information to final dha.yaml configuration....')
+ self.dha_pod_conf = dict(merge_dicts(self.dha_pod_conf, dha_scenario_override_conf))
+
+ def dump_dha_config(self):
+ # Dump final dha.yaml to argument provided directory
+ path = self.kwargs["output_path"]
+ print('Dumping final dha.yaml to ' + path + '/dha.yaml....')
+ with open(path + '/dha.yaml', "w") as f:
+ f.write("\n".join([("title: DHA.yaml file automatically generated from "
+ "the configuration files stated in the "
+ '"configuration-files" fragment below'),
+ "version: " + str(calendar.timegm(time.gmtime())),
+ "created: " + time.strftime("%d/%m/%Y %H:%M:%S"),
+ "comment: none\n"]))
+
+ f.write("configuration-files:\n")
+
+ f.write("\n".join([" dha-pod-configuration:",
+ " uri: " + self.kwargs["dha_uri"],
+ " title: " + str(self.dha_metadata['title']),
+ " version: " + str(self.dha_metadata['version']),
+ " created: " + str(self.dha_metadata['created']),
+ " sha-1: " + self.dha_metadata['sha'],
+ " comment: " + str(self.dha_metadata['comment']) + "\n"]))
+
+ f.write("\n".join([" deployment-scenario:",
+ " uri: " + self.scenario_metadata['uri'],
+ " title: " + str(self.scenario_metadata['title']),
+ " version: " + str(self.scenario_metadata['version']),
+ " created: " + str(self.scenario_metadata['created']),
+ " sha-1: " + self.scenario_metadata['sha'],
+ " comment: " + str(self.scenario_metadata['comment']) + "\n"]))
+
+ yaml.dump(self.dha_pod_conf, f, default_flow_style=False)
+
+
+def main():
+ setup_yaml()
+
+ deploy_config = DeployConfig()
+ deploy_config.process_dea_base()
+ deploy_config.process_dea_pod_override()
+ deploy_config.process_scenario_config()
+ deploy_config.dump_dea_config()
+
+ deploy_config.process_dha_pod_config()
+ deploy_config.dump_dha_config()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/deploy/environments/execution_environment.py b/deploy/environments/execution_environment.py
index af0e130dd..7a0b4744e 100644
--- a/deploy/environments/execution_environment.py
+++ b/deploy/environments/execution_environment.py
@@ -46,7 +46,7 @@ class ExecutionEnvironment(object):
disk_files.append(source_file)
log('Deleting VM %s with disks %s' % (vm_name, disk_files))
exec_cmd('virsh destroy %s' % vm_name, False)
- exec_cmd('virsh undefine %s' % vm_name, False)
+ exec_cmd('virsh undefine --managed-save --remove-all-storage %s' % vm_name, False)
for file in disk_files:
delete(file)
diff --git a/deploy/install_fuel_master.py b/deploy/install_fuel_master.py
index 808d0b14c..a0e28b033 100644
--- a/deploy/install_fuel_master.py
+++ b/deploy/install_fuel_master.py
@@ -156,13 +156,12 @@ class InstallFuelMaster(object):
def wait_until_fuel_menu_up(self):
WAIT_LOOP = 60
SLEEP_TIME = 10
- CMD = 'ps -ef'
- SEARCH = 'fuelmenu'
+ CMD = 'pgrep -f fuelmenu'
fuel_menu_pid = None
with self.ssh:
for i in range(WAIT_LOOP):
ret = self.ssh.exec_cmd(CMD)
- fuel_menu_pid = self.get_fuel_menu_pid(ret, SEARCH)
+ fuel_menu_pid = ret.strip()
if not fuel_menu_pid:
time.sleep(SLEEP_TIME)
else:
@@ -171,11 +170,6 @@ class InstallFuelMaster(object):
raise Exception('Could not find the Fuel Menu Process ID')
return fuel_menu_pid
- def get_fuel_menu_pid(self, printout, search):
- for line in printout.splitlines():
- if line.endswith(search):
- return clean(line)[1]
-
def ssh_exec_cmd(self, cmd, check=True):
with self.ssh:
ret = self.ssh.exec_cmd(cmd, check=check)
@@ -198,7 +192,7 @@ class InstallFuelMaster(object):
def wait_until_installation_completed(self):
WAIT_LOOP = 360
SLEEP_TIME = 10
- CMD = 'ps -ef | grep %s | grep -v grep' % BOOTSTRAP_ADMIN
+ CMD = 'pgrep -f %s' % BOOTSTRAP_ADMIN
install_completed = False
with self.ssh:
diff --git a/deploy/scenario/ha_odl-l2_sfc_heat_ceilometer_scenario.yaml b/deploy/scenario/ha_odl-l2_sfc_heat_ceilometer_scenario.yaml
index e6aef2aba..c4789484c 100644
--- a/deploy/scenario/ha_odl-l2_sfc_heat_ceilometer_scenario.yaml
+++ b/deploy/scenario/ha_odl-l2_sfc_heat_ceilometer_scenario.yaml
@@ -1,5 +1,5 @@
##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
+# Copyright (c) 2015,2016 Ericsson AB and others.
# jonas.bjurel@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -22,7 +22,7 @@
# deployment configuration meta-data
deployment-scenario-metadata:
title: ODL-L2 SFC HA deployment
- version: 0.0.1
+ version: 0.0.2
created: Feb 10 2016
comment: Rebased to Fuel9
@@ -52,6 +52,9 @@ stack-extensions:
value: true
metadata:
plugin_version: 0.9.0
+ - module: tacker
+ module-config-name: fuel-tacker
+ module-config-version: 0.9.0
# Note that the module substitionion does not support arrays
# This is a quick fix
# - module: opendaylight
@@ -78,7 +81,7 @@ dea-override-config:
role: controller,opendaylight
- id: 2
interfaces: interfaces_1
- role: mongo,controller
+ role: mongo,controller,tacker
- id: 3
interfaces: interfaces_1
role: ceph-osd,controller
diff --git a/deploy/scenario/ha_odl-l3_heat_ceilometer_scenario.yaml b/deploy/scenario/ha_odl-l3_heat_ceilometer_scenario.yaml
index 0dda3b3ca..967e7d295 100644
--- a/deploy/scenario/ha_odl-l3_heat_ceilometer_scenario.yaml
+++ b/deploy/scenario/ha_odl-l3_heat_ceilometer_scenario.yaml
@@ -44,6 +44,11 @@ stack-extensions:
value: true
metadata:
plugin_version: 0.9.0
+
+ - module: fuel-plugin-ovs
+ module-config-name: fuel-nshovs
+ module-config-version: 0.9.0
+
# - module: opendaylight
# module-config-name: fuel-odl
# module-config-version: 0.0.2
diff --git a/deploy/scenario/no-ha_odl-l2_sfc_heat_ceilometer_scenario.yaml b/deploy/scenario/no-ha_odl-l2_sfc_heat_ceilometer_scenario.yaml
index a8d9ed848..90a45d577 100644
--- a/deploy/scenario/no-ha_odl-l2_sfc_heat_ceilometer_scenario.yaml
+++ b/deploy/scenario/no-ha_odl-l2_sfc_heat_ceilometer_scenario.yaml
@@ -1,5 +1,5 @@
##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
+# Copyright (c) 2015,2016 Ericsson AB and others.
# jonas.bjurel@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -22,7 +22,7 @@
# deployment configuration meta-data
deployment-scenario-metadata:
title: ODL-L2-SFC No-HA deployment
- version: 0.0.2
+ version: 0.0.3
created: Feb 10 2016
comment: Fuel ODL-L2 SFC No HA with Ceph, Ceilometer and Heat Rebased for Fuel9
@@ -52,6 +52,9 @@ stack-extensions:
value: true
metadata:
plugin_version: 0.9.0
+ - module: tacker
+ module-config-name: fuel-tacker
+ module-config-version: 0.9.0
# Note that the module substitionion does not support arrays
# This is a quick fix
@@ -76,7 +79,7 @@ dea-override-config:
nodes:
- id: 1
interfaces: interfaces_1
- role: mongo,controller
+ role: mongo,controller,tacker
- id: 2
interfaces: interfaces_1
role: ceph-osd,opendaylight
diff --git a/deploy/scenario/no-ha_odl-l3_heat_ceilometer_scenario.yaml b/deploy/scenario/no-ha_odl-l3_heat_ceilometer_scenario.yaml
index 5f800abad..0c8415f78 100644
--- a/deploy/scenario/no-ha_odl-l3_heat_ceilometer_scenario.yaml
+++ b/deploy/scenario/no-ha_odl-l3_heat_ceilometer_scenario.yaml
@@ -44,6 +44,11 @@ stack-extensions:
value: true
metadata:
plugin_version: 0.9.0
+
+ - module: fuel-plugin-ovs
+ module-config-name: fuel-nshovs
+ module-config-version: 0.9.0
+
# - module: opendaylight
# module-config-name: fuel-odl
# module-config-version: 0.0.2
diff --git a/deploy/templater.py b/deploy/templater.py
index 6b41e1f3c..bda60c7fe 100755
--- a/deploy/templater.py
+++ b/deploy/templater.py
@@ -12,6 +12,7 @@
import io
import re
import yaml
+import urllib2
from common import(
err,
ArgParser,
@@ -29,10 +30,29 @@ class Templater(object):
self.output_file = output_file
self.base = self.load_yaml(base_file)
- def load_yaml(self, filename):
+ def is_url(self, filespec):
+ regex = re.compile('^([^/:]+)://')
+ return re.search(regex, filespec)
+
+ def load_template(self, filespec):
+ try:
+ if(self.is_url(filespec)):
+ response = urllib2.urlopen(filespec)
+ return response.read()
+ else:
+ with io.open(filespec) as f:
+ return f.readlines()
+ except Exception as error:
+ err('Error opening template file: %s' % error)
+
+ def load_yaml(self, filespec):
try:
- with io.open(filename) as yaml_file:
- return yaml.load(yaml_file)
+ if(self.is_url(filespec)):
+ response = urllib2.urlopen(filespec)
+ return yaml.load(response)
+ else:
+ with io.open(filespec) as f:
+ return yaml.load(f)
except Exception as error:
err('Error opening YAML file: %s' % error)
@@ -147,12 +167,11 @@ class Templater(object):
regex = re.compile(re.escape(TAG_START) + r'([a-z].+)' + re.escape(TAG_END),
flags=re.IGNORECASE)
- with io.open(self.template_file) as f:
- for line in f:
- indent = self.get_indent(line)
- result += re.sub(regex,
- lambda match: self.parse_tag(match.group(1), indent),
- line)
+ for line in self.load_template(self.template_file):
+ indent = self.get_indent(line)
+ result += re.sub(regex,
+ lambda match: self.parse_tag(match.group(1), indent),
+ line)
self.save_yaml(self.output_file, result)
@@ -164,9 +183,9 @@ template variable substitution and write the results to 'output_file'.'''
parser = ArgParser(prog='python %s' % __file__,
description=description)
parser.add_argument('base_file',
- help='Base YAML filename')
+ help='Base YAML file or URL')
parser.add_argument('template_file',
- help='Fragment filename')
+ help='Template file or URL')
parser.add_argument('output_file',
help='Output filename')
diff --git a/deploy/templates/hardware_environment/vms/enea_lab/fuel.xml b/deploy/templates/hardware_environment/vms/enea_lab/fuel.xml
new file mode 100644
index 000000000..15617f577
--- /dev/null
+++ b/deploy/templates/hardware_environment/vms/enea_lab/fuel.xml
@@ -0,0 +1,88 @@
+<domain type='kvm' id='1'>
+ <name>fuel</name>
+ <memory unit='KiB'>8290304</memory>
+ <currentMemory unit='KiB'>8290304</currentMemory>
+ <vcpu placement='static'>4</vcpu>
+ <resource>
+ <partition>/machine</partition>
+ </resource>
+ <os>
+ <type arch='x86_64' machine='pc-i440fx-rhel7.0.0'>hvm</type>
+ <boot dev='cdrom'/>
+ <boot dev='hd'/>
+ <bootmenu enable='no'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='host-model'>
+ <model fallback='allow'/>
+ </cpu>
+ <clock offset='utc'>
+ <timer name='rtc' tickpolicy='catchup'/>
+ <timer name='pit' tickpolicy='delay'/>
+ <timer name='hpet' present='no'/>
+ </clock>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <pm>
+ <suspend-to-mem enabled='no'/>
+ <suspend-to-disk enabled='no'/>
+ </pm>
+ <devices>
+ <emulator>/usr/libexec/qemu-kvm</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='writeback'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block' device='cdrom'>
+ <driver name='qemu' type='raw'/>
+ <target dev='hdb' bus='ide'/>
+ <readonly/>
+ </disk>
+ <controller type='usb' index='0' model='ich9-ehci1'>
+ </controller>
+ <controller type='usb' index='0' model='ich9-uhci1'>
+ <master startport='0'/>
+ </controller>
+ <controller type='usb' index='0' model='ich9-uhci2'>
+ <master startport='2'/>
+ </controller>
+ <controller type='usb' index='0' model='ich9-uhci3'>
+ <master startport='4'/>
+ </controller>
+ <controller type='pci' index='0' model='pci-root'>
+ </controller>
+ <controller type='ide' index='0'>
+ </controller>
+ <controller type='virtio-serial' index='0'>
+ </controller>
+ <interface type='bridge'>
+ <model type='virtio'/>
+ </interface>
+ <interface type='bridge'>
+ <model type='virtio'/>
+ </interface>
+ <serial type='pty'>
+ <source path='/dev/pts/0'/>
+ <target port='0'/>
+ </serial>
+ <console type='pty' tty='/dev/pts/0'>
+ <source path='/dev/pts/0'/>
+ <target type='serial' port='0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='5906' autoport='yes' listen='127.0.0.1'>
+ <listen type='address' address='127.0.0.1'/>
+ </graphics>
+ <video>
+ <model type='vga' vram='16384' heads='1'/>
+ </video>
+ <memballoon model='virtio'>
+ </memballoon>
+ </devices>
+</domain>
diff --git a/docs/installationprocedure/installation.instruction.rst b/docs/installationprocedure/installation.instruction.rst
index 715159910..2d6889bf8 100644
--- a/docs/installationprocedure/installation.instruction.rst
+++ b/docs/installationprocedure/installation.instruction.rst
@@ -186,7 +186,7 @@ VLANs needs to be manually configured.
Manual configuration of the Colorado hardware platform should
be carried out according to the OPNFV Pharos specification:
-<https://wiki.opnfv.org/pharos/pharos_specification>
+<https://wiki.opnfv.org/display/pharos/Pharos+Specification>
==========================================
OPNFV Software installation and deployment
diff --git a/docs/releasenotes/release-notes.rst b/docs/releasenotes/release-notes.rst
index a748e41f9..c3398df19 100644
--- a/docs/releasenotes/release-notes.rst
+++ b/docs/releasenotes/release-notes.rst
@@ -234,27 +234,27 @@ OpenDaylight
Fuel
====
-6) `The Fuel OpenStack project <https://wiki.openstack.org/wiki/Fuel>`_ 'https://wiki.openstack.org/wiki/Fuel'
+6) `The Fuel OpenStack project <https://wiki.openstack.org/wiki/Fuel>`_: https://wiki.openstack.org/wiki/Fuel
-7) `Fuel documentation overview <https://docs.fuel-infra.org/openstack/fuel/fuel-9.0/>`_ 'https://docs.fuel-infra.org/openstack/fuel/fuel-9.0/'
+7) `Fuel documentation overview <http://docs.openstack.org/developer/fuel-docs>`_: http://docs.openstack.org/developer/fuel-docs
-8) `Fuel planning guide <https://docs.fuel-infra.org/openstack/fuel/fuel-9.0/mos-planning-guide.html>`_ 'https://docs.fuel-infra.org/openstack/fuel/fuel-9.0/mos-planning-guide.html'
+8) `Fuel Installation Guide <http://docs.openstack.org/developer/fuel-docs/userdocs/fuel-install-guide.html>`_: http://docs.openstack.org/developer/fuel-docs/userdocs/fuel-install-guide.html
-9) `Fuel quick start guide <https://docs.mirantis.com/openstack/fuel/fuel-9.0/quickstart-guide.html>`_ 'https://docs.mirantis.com/openstack/fuel/fuel-9.0/quickstart-guide.html'
+9) `Fuel User Guide <http://docs.openstack.org/developer/fuel-docs/userdocs/fuel-user-guide.html>`_: http://docs.openstack.org/developer/fuel-docs/userdocs/fuel-user-guide.html
-10) `Fuel reference architecture <https://docs.mirantis.com/openstack/fuel/fuel-9.0/reference-architecture.html>`_ 'https://docs.mirantis.com/openstack/fuel/fuel-9.0/reference-architecture.html'
+10) `Fuel Developer Guide <http://docs.openstack.org/developer/fuel-docs/devdocs/develop.html>`_: http://docs.openstack.org/developer/fuel-docs/devdocs/develop.html
-11) `Fuel Plugin Developers Guide <https://wiki.openstack.org/wiki/Fuel/Plugins>`_ 'https://wiki.openstack.org/wiki/Fuel/Plugins'
+11) `Fuel Plugin Developers Guide <http://docs.openstack.org/developer/fuel-docs/plugindocs/fuel-plugin-sdk-guide.html>`_: http://docs.openstack.org/developer/fuel-docs/plugindocs/fuel-plugin-sdk-guide.html
-12) `Fuel OpenStack Hardware Compatibility List <https://www.mirantis.com/products/openstack-drivers-and-plugins/hardware-compatibility-list>`_ 'https://www.mirantis.com/products/openstack-drivers-and-plugins/hardware-compatibility-list'
+12) `Fuel OpenStack Hardware Compatibility List <https://www.mirantis.com/products/openstack-drivers-and-plugins/hardware-compatibility-list>`_: https://www.mirantis.com/products/openstack-drivers-and-plugins/hardware-compatibility-list
Fuel in OPNFV
=============
-13) `OPNFV Installation instruction for the Colorado release of OPNFV when using Fuel as a deployment tool <http://artifacts.opnfv.org/fuel/colorado/docs/installation-instruction.html>`_ 'http://artifacts.opnfv.org/fuel/colorado/docs/installation-instruction.html'
+13) `OPNFV Installation instruction for the Colorado release of OPNFV when using Fuel as a deployment tool <http://artifacts.opnfv.org/fuel/colorado/docs/installationprocedure/index.html>`_ 'http://artifacts.opnfv.org/fuel/colorado/docs/installationprocedure/index.html'
-14) `OPNFV Build instruction for the Colorado release of OPNFV when using Fuel as a deployment tool <http://artifacts.opnfv.org/fuel/colorado/docs/build-instruction.html>`_ 'http://artifacts.opnfv.org/fuel/colorado/docs/build-instruction.html'
+14) `OPNFV Build instruction for the Colorado release of OPNFV when using Fuel as a deployment tool <http://artifacts.opnfv.org/fuel/colorado/docs/buildprocedure/index.html>`_ 'http://artifacts.opnfv.org/fuel/colorado/docs/buildprocedure/index.html'
-15) `OPNFV Release Note for the Colorado release of OPNFV when using Fuel as a deployment tool <http://artifacts.opnfv.org/fuel/colorado/docs/release-notes.html>`_ 'http://artifacts.opnfv.org/fuel/colorado/docs/release-notes.html'
+15) `OPNFV Release Note for the Colorado release of OPNFV when using Fuel as a deployment tool <http://artifacts.opnfv.org/fuel/colorado/docs/releasenotes/index.html>`_ 'http://artifacts.opnfv.org/fuel/colorado/docs/releasenotes/index.html'
.. [1] OpenDaylight Boron RC3.5 is used when Service Function Chaining is enabled in Fuel plugin.