From a90fc16a988cd5eb53de383d0830648f758edaff Mon Sep 17 00:00:00 2001 From: Dan Radez Date: Mon, 4 Jan 2016 15:49:26 -0500 Subject: updating vm creation for virt deployment - replacing brbm with logical names br-netname - replacing instack-virt-setup with tripleo scripts JIRA: APEX-90, APEX-80 Change-Id: I58a15dee8de882e034c8af8a3368ca0647741b13 Signed-off-by: Dan Radez --- build/Makefile | 332 ++++++++-------- build/cache.mk | 80 ---- build/cache.sh | 59 +++ build/config.mk | 0 build/instack.sh | 441 --------------------- build/opnfv-apex-common.spec | 8 +- build/opnfv-apex-onos.spec | 32 ++ build/opnfv-apex-opendaylight-sfc.spec | 6 +- build/opnfv-apex-undercloud.spec | 24 +- build/opnfv-apex.spec | 9 +- build/opnfv-tripleo-heat-templates.patch | 2 +- build/overcloud-full.sh | 64 +++ build/overcloud-onos.sh | 40 ++ build/overcloud-opendaylight-sfc.sh | 50 +++ build/overcloud-opendaylight.sh | 57 +++ build/undercloud.sh | 57 +++ build/variables.sh | 16 + ci/build.sh | 458 ++++------------------ ci/clean.sh | 12 +- ci/deploy.sh | 251 +++++++----- config/deploy/network/network_settings.yaml | 100 ----- config/network/network_settings.yaml | 100 +++++ docs/installation-instructions/virtualinstall.rst | 19 - include/build.sh.debug | 0 lib/installer/onos/onos_gw_mac_update.sh | 2 +- 25 files changed, 901 insertions(+), 1318 deletions(-) delete mode 100644 build/cache.mk create mode 100644 build/cache.sh delete mode 100644 build/config.mk delete mode 100755 build/instack.sh create mode 100644 build/opnfv-apex-onos.spec create mode 100755 build/overcloud-full.sh create mode 100755 build/overcloud-onos.sh create mode 100755 build/overcloud-opendaylight-sfc.sh create mode 100755 build/overcloud-opendaylight.sh create mode 100755 build/undercloud.sh create mode 100644 build/variables.sh delete mode 100644 config/deploy/network/network_settings.yaml create mode 100644 config/network/network_settings.yaml delete mode 100644 include/build.sh.debug diff --git a/build/Makefile b/build/Makefile index f5a51ebb..53f8a488 100644 --- a/build/Makefile +++ b/build/Makefile @@ -1,7 +1,5 @@ ############################################################################## -# Copyright (c) 2015 Ericsson AB and others. -# stefan.k.berg@ericsson.com -# jonas.bjurel@ericsson.com +# Copyright (c) 2016 Red Hat Inc. # dradez@redhat.com # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 @@ -9,65 +7,183 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -SHELL = /bin/bash -############################################################################ -# BEGIN of variables to customize -# -#Input args -export UNIT_TEST = FALSE export USE_MASTER = "" -export INTERACTIVE = TRUE export CENTDNLD = http://mirrors.cat.pdx.edu/centos/7.2.1511/isos/x86_64/CentOS-7-x86_64-DVD-1511.iso -export ISOSRC = file:$(shell pwd)/$(shell basename $(CENTDNLD)) -export ISOCACHE = $(shell pwd)/$(shell basename $(ISOSRC)) -export PRODNO = "OPNFV_BGS" -export REVSTATE = "0000" -export NEWISO = $(shell pwd)/release/OPNFV-CentOS-7-x86_64-${REVSTATE}.iso +export CENTISO = $(shell pwd)/$(shell basename $(CENTDNLD)) +export RELEASE = "0" +export ISO = $(shell pwd)/release/OPNFV-CentOS-7-x86_64-${RELEASE}.iso export RPMVERS = $(shell grep Version $(shell pwd)/opnfv-apex.spec | awk '{ print $$2 }') -export APEXRPMCOM = $(shell pwd)/noarch/opnfv-apex-common-$(RPMVERS)-$(shell echo ${REVSTATE} | tr -d '_-').noarch.rpm -export APEXRPMINS = $(shell pwd)/noarch/opnfv-apex-undercloud-$(RPMVERS)-$(shell echo ${REVSTATE} | tr -d '_-').noarch.rpm -export APEXRPM = $(shell pwd)/noarch/opnfv-apex-$(RPMVERS)-$(shell echo ${REVSTATE} | tr -d '_-').noarch.rpm +export RPMCOM = $(shell pwd)/noarch/opnfv-apex-common-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm +export RPMUDR = $(shell pwd)/noarch/opnfv-apex-undercloud-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm +export RPMODL = $(shell pwd)/noarch/opnfv-apex-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm +export RPMONO = $(shell pwd)/noarch/opnfv-apex-onos-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm +export RPMSFC = $(shell pwd)/noarch/opnfv-apex-opendaylight-sfc-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm -# Note! Invoke with "make REVSTATE=RXXXX all" to make release build! -# Invoke with ICOCACHE=/full/path/to/iso if cached ISO is in non-standard location. +.PHONY: all +all: iso -#Build variables -export BUILD_BASE := $(shell pwd) -export CACHE_DIR := $(BUILD_BASE)/cache -export VERSION_FILE := $(BUILD_BASE)/.versions -export TOPDIR := $(shell pwd) +.PHONY: clean +clean: images-clean rpms-clean iso-clean -CENTDIR := $(TOPDIR)/centiso -# -# END of variables to customize -############################################################################# +.PHONY: images +images: undercloud overcloud-full overcloud-opendaylight overcloud-onos overcloud-opendaylight-sfc -SUBCLEAN = $(addsuffix .clean,$(SUBDIRS)) +.PHONY: images-clean +images-clean: undercloud-clean overcloud-full-clean overcloud-opendaylight-clean overcloud-onos-clean overcloud-opendaylight-sfc-clean + @rm -rf images/ +.PHONY: rpms +rpms: common-rpm undercloud-rpm opendaylight-rpm onos-rpm opendaylight-sfc-rpm -.PHONY: all -all: iso - @echo "Versions of cached build results built by" $(shell hostname) "at" $(shell date -u) > $(VERSION_FILE) - @echo "cache.mk" $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") >> $(VERSION_FILE) - @echo "config.mk" $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") >> $(VERSION_FILE) - -############################################################################ -# BEGIN of Include definitions -# -include config.mk -include cache.mk -# -# END Include definitions -############################################################################# - -$(ISOCACHE): - sh -c "test -s $(ISOCACHE) || { wget -nv $(CENTDNLD) ; }" +.PHONY: rpms-clean +rpms-clean: common-rpm-clean undercloud-rpm-clean opendaylight-rpm-clean onos-rpm-clean opendaylight-sfc-rpm-clean + +.PHONY: common-rpm +common-rpm: $(RPMCOM) + +$(RPMCOM): + @echo "Building the Apex Common RPM" + # build the common RPM + pushd ../ && git archive --format=tar.gz --prefix=opnfv-apex-common-$(RPMVERS)/ HEAD > build/opnfv-apex-common.tar.gz + rpmbuild --clean -ba opnfv-apex-common.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(RELEASE) | tr -d '_-')" + +############### +# UNDERCLOUD # +############### + +.PHONY: undercloud-clean +undercloud-clean: + @rm -f images/undercloud.* + +.PHONY: undercloud +undercloud: images/undercloud.qcow2 + +images/undercloud.qcow2: + @echo "Building the Apex Undercloud Image" + @./undercloud.sh + +.PHONY: undercloud-rpm +undercloud-rpm: images/undercloud.qcow2 $(RPMUDR) + +$(RPMUDR): + @echo "Building the Apex Undercloud RPM" + # build the undercloud RPM + pushd ../ && git archive --format=tar --prefix=opnfv-apex-undercloud-$(RPMVERS)/ HEAD > build/opnfv-apex-undercloud.tar + tar -rf opnfv-apex-undercloud.tar \ + --xform="s:images/undercloud.qcow2:opnfv-apex-undercloud-$(RPMVERS)/build/undercloud.qcow2:" images/undercloud.qcow2 + gzip -f opnfv-apex-undercloud.tar + rpmbuild --clean -ba opnfv-apex-undercloud.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(RELEASE) | tr -d '_-')" + +############### +# OVERCLOUD # +############### + +.PHONY: overcloud-full-clean +overcloud-full-clean: + @rm -f images/overcloud-full.* + +.PHONY: overcloud-full +overcloud-full: images/overcloud-full.qcow2 + +images/overcloud-full.qcow2: + @echo "Building the Apex Base Overcloud Image" + @./overcloud-full.sh + +############### +# ODL # +############### + +.PHONY: overcloud-opendaylight-clean +overcloud-opendaylight-clean: + @rm -f images/overcloud-full-opendaylight.qcow2 + +.PHONY: overcloud-opendaylight +overcloud-opendaylight: images/overcloud-full-opendaylight.qcow2 + +images/overcloud-full-opendaylight.qcow2: images/overcloud-full.qcow2 + @echo "Building the Apex OpenDaylight Overcloud Image" + @./overcloud-opendaylight.sh + +.PHONY: opendaylight-rpm +opendaylight-rpm: overcloud-opendaylight $(RPMODL) + +$(RPMODL): + @echo "Building the Apex OpenDaylight RPM" + # build the overcloud RPM + tar -czf opnfv-apex.tar.gz --xform="s:images/overcloud-full-opendaylight.qcow2:opnfv-apex-$(RPMVERS)/build/images/overcloud-full-opendaylight.qcow2:" images/overcloud-full-opendaylight.qcow2 + rpmbuild --clean -ba opnfv-apex.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(RELEASE) | tr -d '_-')" + +############### +# ONOS # +############### + +.PHONY: overcloud-onos-clean +overcloud-onos-clean: + @rm -f images/overcloud-full-onos.qcow2 + @rm -rf images/puppet-onos + @rm -f images/puppet-onos.tar.gz + +.PHONY: overcloud-onos +overcloud-onos: images/overcloud-full-onos.qcow2 + +images/overcloud-full-onos.qcow2: images/overcloud-full.qcow2 + @echo "Building the Apex ONOS Overcloud Image" + @./overcloud-onos.sh + +.PHONY: onos-rpm-clean +onos-rpm-clean: + @rpmbuild --clean opnfv-apex-onos.spec -D "release $(shell echo $RELEASE | tr -d '_-')" + +.PHONY: onos-rpm +onos-rpm: overcloud-onos $(RPMONO) + +$(RPMONO): + @echo "Building the Apex ONOS RPM" + # build the overcloud RPM + tar -czf opnfv-apex-onos.tar.gz --xform="s:images/overcloud-full-onos.qcow2:opnfv-apex-onos-$(RPMVERS)/build/images/overcloud-full-onos.qcow2:" images/overcloud-full-onos.qcow2 + rpmbuild --clean -ba opnfv-apex-onos.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(RELEASE) | tr -d '_-')" + +############### +# ODL-SFC # +############### + +.PHONY: overcloud-opendaylight-sfc-clean +overcloud-opendaylight-sfc-clean: + @rm -f images/overcloud-full-opendaylight-sfc.qcow2 + +.PHONY: overcloud-opendaylight-sfc +overcloud-opendaylight-sfc: images/overcloud-full-opendaylight-sfc.qcow2 + +images/overcloud-full-opendaylight-sfc.qcow2: images/overcloud-full-opendaylight.qcow2 + @echo "Building the Apex OpenDaylight Overcloud Image" + @./overcloud-opendaylight-sfc.sh + +.PHONY: opendaylight-sfc-rpm +opendaylight-sfc-rpm: overcloud-opendaylight-sfc $(RPMSFC) + +$(RPMSFC): + @echo "Building the Apex OpenDaylight SFC RPM" + tar -czf opnfv-apex-opendaylight-sfc.tar.gz --xform="s:images/overcloud-full-opendaylight-sfc.qcow2:opnfv-apex-opendaylight-sfc-$(RPMVERS)/build/images/overcloud-full-opendaylight-sfc.qcow2:" images/overcloud-full-opendaylight-sfc.qcow2 + rpmbuild --clean -ba opnfv-apex-opendaylight-sfc.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(RELEASE) | tr -d '_-')" + +############### +# ISO # +############### + +$(CENTISO): + curl $(CENTDNLD) -z $(CENTISO) -o $(CENTISO) --verbose --silent --location + +.PHONY: iso-clean +iso-clean: + @rm -Rf centos + @rm -Rf release + @rm -f $(ISO) .PHONY: mount-centiso umount-centiso -mount-centiso: $(ISOCACHE) +mount-centiso: $(CENTISO) @echo "Mounting CentOS ISO in $(CENTDIR)" @mkdir -p $(CENTDIR) - @fuseiso $(ISOCACHE) $(CENTDIR) + @fuseiso $(CENTISO) $(CENTDIR) umount-centiso: @set +e @@ -76,119 +192,25 @@ umount-centiso: @rmdir $(CENTDIR) @set -e -.PHONY: build-clean $(SUBCLEAN) -build-clean: instack-clean $(SUBCLEAN) - @rm -Rf centos - @rm -Rf release - @rm -Rf newiso - @rm -f $(NEWISO) - -.PHONY: clean $(SUBCLEAN) -clean: clean-cache build-clean $(SUBCLEAN) - @rm -f *.iso - @rm -Rf release - @rm -Rf newiso - @rm -f $(NEWISO) - @rm -f $(BUILD_BASE)/.versions - -$(SUBCLEAN): %.clean: - $(MAKE) -C $* -f Makefile clean - -.PHONY: rpm-clean -rpm-clean: - @rm -f build/opnfv-apex-common.tar.gz - @rm -f build/opnfv-apex-undercloud.tar.gz - @rm -f build/opnfv-apex.tar.gz - -.PHONY: rpm -rpm: - # build the common RPM - pushd ../ && git archive --format=tar.gz --prefix=opnfv-apex-common-$(RPMVERS)/ HEAD > build/opnfv-apex-common.tar.gz - rpmbuild -ba opnfv-apex-common.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(REVSTATE) | tr -d '_-')" - # build the undercloud RPM - tar -czf opnfv-apex-undercloud.tar.gz \ - --xform="s:stack/instack.qcow2:opnfv-apex-undercloud-$(RPMVERS)/build/instack.qcow2:" \ - --xform="s:instack.xml:opnfv-apex-undercloud-$(RPMVERS)/build/instack.xml:" \ - --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_0.xml:opnfv-apex-undercloud-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_0.xml:" \ - --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_1.xml:opnfv-apex-undercloud-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_1.xml:" \ - --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_2.xml:opnfv-apex-undercloud-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_2.xml:" \ - --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_3.xml:opnfv-apex-undercloud-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_3.xml:" \ - --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_4.xml:opnfv-apex-undercloud-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_4.xml:" \ - --xform="s:brbm-net.xml:opnfv-apex-undercloud-$(RPMVERS)/build/brbm-net.xml:" \ - --xform="s:brbm1-net.xml:opnfv-apex-undercloud-$(RPMVERS)/build/brbm1-net.xml:" \ - --xform="s:brbm2-net.xml:opnfv-apex-undercloud-$(RPMVERS)/build/brbm2-net.xml:" \ - --xform="s:brbm3-net.xml:opnfv-apex-undercloud-$(RPMVERS)/build/brbm3-net.xml:" \ - --xform="s:default-pool.xml:opnfv-apex-undercloud-$(RPMVERS)/build/default-pool.xml:" \ - --xform="s:network-environment.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/network-environment.yaml:" \ - --xform="s:nics/controller.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/controller.yaml:" \ - --xform="s:nics/compute.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute.yaml:" \ - --xform="s:nics/controller_private.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/controller_private.yaml:" \ - --xform="s:nics/compute_private.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_private.yaml:" \ - --xform="s:nics/controller_storage.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/controller_storage.yaml:" \ - --xform="s:nics/compute_storage.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_storage.yaml:" \ - --xform="s:nics/controller_private_storage.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/controller_private_storage.yaml:" \ - --xform="s:nics/compute_private_storage.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_private_storage.yaml:" \ - --xform="s:nics/compute_br-ex.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_br-ex.yaml:" \ - --xform="s:nics/compute_private_br-ex.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_private_br-ex.yaml:" \ - --xform="s:nics/compute_storage_br-ex.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_storage_br-ex.yaml:" \ - --xform="s:nics/compute_private_storage_br-ex.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_private_storage_br-ex.yaml:" \ - --xform="s:nics/compute_no-public-ip.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_no-public-ip.yaml:" \ - --xform="s:nics/compute_private_no-public-ip.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_private_no-public-ip.yaml:" \ - --xform="s:nics/compute_storage_no-public-ip.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_storage_no-public-ip.yaml:" \ - --xform="s:nics/compute_private_storage_no-public-ip.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_private_storage_no-public-ip.yaml:" \ - --xform="s:nics/compute_br-ex_no-public-ip.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_br-ex_no-public-ip.yaml:" \ - --xform="s:nics/compute_private_br-ex_no-public-ip.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_private_br-ex_no-public-ip.yaml:" \ - --xform="s:nics/compute_storage_br-ex_no-public-ip.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_storage_br-ex_no-public-ip.yaml:" \ - --xform="s:nics/compute_private_storage_br-ex_no-public-ip.yaml:opnfv-apex-undercloud-$(RPMVERS)/build/nics/compute_private_storage_br-ex_no-public-ip.yaml:" \ - --xform="s:instackenv-virt.json:opnfv-apex-undercloud-$(RPMVERS)/build/instackenv-virt.json:" \ - --xform="s:instackenv.json.example:opnfv-apex-undercloud-$(RPMVERS)/build/instackenv.json.example:" \ - stack/instack.qcow2 instack.xml baremetalbrbm_brbm1_brbm2_brbm3_0.xml baremetalbrbm_brbm1_brbm2_brbm3_1.xml \ - baremetalbrbm_brbm1_brbm2_brbm3_2.xml baremetalbrbm_brbm1_brbm2_brbm3_3.xml baremetalbrbm_brbm1_brbm2_brbm3_4.xml \ - brbm-net.xml brbm1-net.xml brbm2-net.xml brbm3-net.xml default-pool.xml instackenv-virt.json network-environment.yaml \ - nics/controller.yaml nics/compute.yaml nics/controller_private.yaml nics/compute_private.yaml \ - nics/compute_br-ex.yaml nics/compute_private_br-ex.yaml nics/compute_storage_br-ex.yaml nics/compute_private_storage_br-ex.yaml \ - nics/compute_no-public-ip.yaml nics/compute_private_no-public-ip.yaml nics/compute_storage_no-public-ip.yaml nics/compute_private_storage_no-public-ip.yaml \ - nics/compute_br-ex_no-public-ip.yaml nics/compute_private_br-ex_no-public-ip.yaml nics/compute_storage_br-ex_no-public-ip.yaml nics/compute_private_storage_br-ex_no-public-ip.yaml \ - nics/controller_storage.yaml nics/compute_storage.yaml nics/controller_private_storage.yaml \ - nics/compute_private_storage.yaml instackenv-virt.json instackenv.json.example - rpmbuild -ba opnfv-apex-undercloud.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(REVSTATE) | tr -d '_-')" - # build the overcloud RPM - tar -czf opnfv-apex.tar.gz --xform="s:stack/overcloud-full-opendaylight.qcow2:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full-opendaylight.qcow2:" stack/overcloud-full-opendaylight.qcow2 - rpmbuild -ba opnfv-apex.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(REVSTATE) | tr -d '_-')" - tar -czf opnfv-apex-opendaylight-sfc.tar.gz --xform="s:stack/overcloud-full-opendaylight-sfc.qcow2:opnfv-apex-opendaylight-sfc-$(RPMVERS)/build/stack/overcloud-full-opendaylight-sfc.qcow2:" stack/overcloud-full-opendaylight-sfc.qcow2 - rpmbuild -ba opnfv-apex-opendaylight-sfc.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(REVSTATE) | tr -d '_-')" - -.PHONY: instack -instack: - @./instack.sh $(USE_MASTER) - -.PHONY: instack-clean -instack-clean: - rm -f instackenv-virt.json - rm -f baremetalbrbm_brbm1_brbm2_brbm3_0.xml - rm -f baremetalbrbm_brbm1_brbm2_brbm3_1.xml - rm -f baremetalbrbm_brbm1_brbm2_brbm3_2.xml - rm -f baremetalbrbm_brbm1_brbm2_brbm3_3.xml - rm -f baremetalbrbm_brbm1_brbm2_brbm3_4.xml - rm -f instack.xml - rm -rf stack/onos - .PHONY: iso -iso: build-clean instack rpm $(ISOCACHE) +iso: iso-clean images rpms $(CENTISO) + @echo "Building the Apex ISO" @mkdir centos release - cd centos && bsdtar -xf ../$(shell basename $(ISOSRC)) + cd centos && bsdtar -xf ../$(shell basename $(CENTISO)) # modify the installer iso's contents @chmod -R u+w centos @cp -f isolinux.cfg centos/isolinux/isolinux.cfg - @cp $(APEXRPMCOM) centos/Packages - @cp $(APEXRPMINS) centos/Packages - @cp $(APEXRPM) centos/Packages - cd centos/Packages && yumdownloader openvswitch + @ln $(RPMCOM) centos/Packages + @ln $(RPMUDR) centos/Packages + @ln $(RPMODL) centos/Packages + @ln $(RPMONO) centos/Packages + @ln $(RPMSFC) centos/Packages + cd centos/Packages && yumdownloader openvswitch && yumdownloader openstack-tripleo # regenerate yum repo data @echo "Generating new yum metadata" createrepo --update -g ../c7-opnfv-x86_64-comps.xml centos # build the iso @echo "Building OPNFV iso" - mkisofs -b isolinux/isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table -V "OPNFV CentOS 7 x86_64" -R -J -v -T -o $(NEWISO) centos - isohybrid $(NEWISO) - @printf "\n\nISO is built at $(NEWISO)\n\n" + mkisofs -b isolinux/isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table -V "OPNFV CentOS 7 x86_64" -R -J -v -T -o $(ISO) centos + isohybrid $(ISO) + @printf "\n\nISO is built at $(ISO)\n\n" diff --git a/build/cache.mk b/build/cache.mk deleted file mode 100644 index acec36ad..00000000 --- a/build/cache.mk +++ /dev/null @@ -1,80 +0,0 @@ -############################################################################# -# Copyright (c) 2015 Ericsson AB and others. -# stefan.k.berg@ericsson.com -# jonas.bjurel@ericsson.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -SHELL = /bin/bash -CACHEVALIDATE := $(addsuffix .validate,$(SUBDIRS)) -CACHECLEAN := $(addsuffix .clean,$(CACHEFILES) $(CACHEDIRS)) - -############################################################################ -# BEGIN of variables to customize -# -CACHEFILES += .versions -CACHEFILES += stack/overcloud-full.tar -CACHEFILES += stack/undercloud.qcow2 -CACHEFILES += $(shell basename $(ISOSRC)) -# -# END of variables to customize -############################################################################ - -.PHONY: prepare-cache -prepare-cache: make-cache-dir $(CACHEDIRS) $(CACHEFILES) - -.PHONY: make-cache-dir -make-cache-dir: - @rm -rf ${CACHE_DIR} - @mkdir ${CACHE_DIR} - -.PHONY: clean-cache -clean-cache: $(CACHECLEAN) - @rm -rf ${CACHE_DIR} - -.PHONY: $(CACHEDIRS) -$(CACHEDIRS): - @mkdir -p $(dir $(CACHE_DIR)/$@) - @if [ ! -d $(BUILD_BASE)/$@ ]; then\ - mkdir -p $(BUILD_BASE)/$@;\ - fi - @ln -s $(BUILD_BASE)/$@ $(CACHE_DIR)/$@ - -.PHONY: $(CACHEFILES) -$(CACHEFILES): - @mkdir -p $(dir $(CACHE_DIR)/$@) - @if [ ! -d $(dir $(BUILD_BASE)/$@) ]; then\ - mkdir -p $(dir $(BUILD_BASE)/$@);\ - fi - - @if [ ! -f $(BUILD_BASE)/$@ ]; then\ - echo " " > $(BUILD_BASE)/$@;\ - ln -s $(BUILD_BASE)/$@ $(CACHE_DIR)/$@;\ - rm -f $(BUILD_BASE)/$@;\ - else\ - ln -s $(BUILD_BASE)/$@ $(CACHE_DIR)/$@;\ - fi - -.PHONY: validate-cache -validate-cache: $(CACHEVALIDATE) - @if [[ $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") != $(shell cat $(VERSION_FILE) | grep config.mk | awk '{print $$NF}') ]]; then\ - echo "Cache does not match current config.mk definition, cache must be rebuilt";\ - exit 1;\ - fi; - - @if [[ $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") != $(shell cat $(VERSION_FILE) | grep cache.mk | awk '{print $$NF}') ]]; then\ - echo "Cache does not match current cache.mk definition, cache must be rebuilt";\ - exit 1;\ - fi; - -.PHONY: $(CACHEVALIDATE) -$(CACHEVALIDATE): %.validate: - @echo VALIDATE $(CACHEVALIDATE) - $(MAKE) -C $* -f Makefile validate-cache - -.PHONY: $(CACHECLEAN) -$(CACHECLEAN): %.clean: - rm -rf ${CACHE_DIR}/$* diff --git a/build/cache.sh b/build/cache.sh new file mode 100644 index 00000000..b8cd8ecc --- /dev/null +++ b/build/cache.sh @@ -0,0 +1,59 @@ +#!/bin/sh +############################################################################## +# Copyright (c) 2016 Red Hat Inc. +# Dan Radez +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +CACHE_DIR="$(pwd)/cache" + +# Make sure the cache dir exists +function cache_dir { + if [ ! -d $CACHE_DIR/ ]; then mkdir $CACHE_DIR/; fi + if [ ! -f $CACHE_DIR/.cache ]; then touch $CACHE_DIR/.cache; fi + echo "Cache Dir: $CACHE_DIR" +} + +function cache_git_tar { + echo "cache_git_tar git ls-remote" +} + +# $1 = download url +# $2 = filename to write to +function curl_file { + echo "Downloading $1" + echo "Cache location: $CACHE_DIR/$2" + curl -L $1 > $CACHE_DIR/$2 + sed -i "/$2/d" $CACHE_DIR/.cache + echo "$(md5sum $CACHE_DIR/$2) $2" >> $CACHE_DIR/.cache +} + +# $1 = download url +function populate_cache { + cache_dir + + # get the file name + filename="${1##*/}" + + # check if the cache file exists + # and if it has an md5 compare that + echo "Checking cache file: $1" + if [ ! -f $CACHE_DIR/${filename} ]; then + curl_file $1 $filename + else + remote_md5="$(curl -L ${1}.md5 | awk {'print $1'})" + if [ -z "$remote_md5" ]; then + echo "Got empty MD5 from remote for $filename, skipping MD5 check" + elif [ "$remote_md5" != "$(grep ${filename} $CACHE_DIR/.cache | awk {'print $1'})" ]; then + curl_file $1 $filename + fi + fi +} + +# $1 = filename to get from cache +function get_cached_file { + cp -f $CACHE_DIR/$1 . +} diff --git a/build/config.mk b/build/config.mk deleted file mode 100644 index e69de29b..00000000 diff --git a/build/instack.sh b/build/instack.sh deleted file mode 100755 index 2ba18b5a..00000000 --- a/build/instack.sh +++ /dev/null @@ -1,441 +0,0 @@ -#!/bin/sh -############################################################################## -# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## -set -e -declare -i CNT - -#rdo_images_uri=https://repos.fedorapeople.org/repos/openstack-m/rdo-images-centos-liberty-opnfv -rdo_images_uri=file:///stable-images -rdo_images_cache=/stable-images -onos_artifacts_uri=file:///stable-images/onos -odl_artifacts_cache=/stable-images/odl - -vm_index=4 -RDO_RELEASE=liberty -SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null) -OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network" - -# check for dependency packages -for i in rpm-build createrepo libguestfs-tools python-docutils bsdtar; do - if ! rpm -q $i > /dev/null; then - sudo yum install -y $i - fi -done - -# RDO Manager expects a stack user to exist, this checks for one -# and creates it if you are root -if ! id stack > /dev/null; then - sudo useradd stack; - sudo echo 'stack ALL=(root) NOPASSWD:ALL' | sudo tee -a /etc/sudoers.d/stack - sudo echo 'Defaults:stack !requiretty' | sudo tee -a /etc/sudoers.d/stack - sudo chmod 0440 /etc/sudoers.d/stack - echo 'Added user stack' -fi - -# ensure that I can ssh as the stack user -if ! sudo grep "$(cat ~/.ssh/id_rsa.pub)" /home/stack/.ssh/authorized_keys; then - if ! sudo ls -d /home/stack/.ssh/ ; then - sudo mkdir /home/stack/.ssh - sudo chown stack:stack /home/stack/.ssh - sudo chmod 700 /home/stack/.ssh - fi - USER=$(whoami) sudo sh -c "cat ~$USER/.ssh/id_rsa.pub >> /home/stack/.ssh/authorized_keys" - sudo chown stack:stack /home/stack/.ssh/authorized_keys -fi - -# clean up stack user previously build instack disk images -ssh -T ${SSH_OPTIONS[@]} stack@localhost "rm -f instack*.qcow2" - -# Yum repo setup for building the undercloud -if ! rpm -q rdo-release > /dev/null && [ "$1" != "-master" ]; then - #pulling from current-passed-ci instead of release repos - #sudo yum install -y https://rdoproject.org/repos/openstack-${RDO_RELEASE}/rdo-release-${RDO_RELEASE}.rpm - sudo yum -y install yum-plugin-priorities - sudo yum-config-manager --disable openstack-${RDO_RELEASE} - sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo - sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo - sudo rm -f /etc/yum.repos.d/delorean-current.repo -elif [ "$1" == "-master" ]; then - sudo yum -y install yum-plugin-priorities - sudo yum-config-manager --disable openstack-${RDO_RELEASE} - sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7/current-passed-ci/delorean.repo - sudo curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo - sudo rm -f /etc/yum.repos.d/delorean-current.repo -fi - -# ensure the undercloud package is installed so we can build the undercloud -if ! rpm -q instack-undercloud > /dev/null; then - sudo yum install -y python-tripleoclient -fi - -# ensure openvswitch is installed -if ! rpm -q openvswitch > /dev/null; then - sudo yum install -y openvswitch -fi - -# ensure libvirt is installed -if ! rpm -q libvirt-daemon-kvm > /dev/null; then - sudo yum install -y libvirt-daemon-kvm -fi - -# clean this up incase it's there -sudo rm -f /tmp/instack.answers - -# ensure that no previous undercloud VMs are running -sudo ../ci/clean.sh -# and rebuild the bare undercloud VMs -ssh -T ${SSH_OPTIONS[@]} stack@localhost < /dev/null && [ $CNT -gt 0 ]; do - echo -n "." - sleep 3 - CNT=CNT-1 -done -# TODO fail if CNT=0 - -# yum repo, triple-o package and ssh key setup for the undercloud -ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" < /dev/null; then - yum install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm -fi - -yum -y install yum-plugin-priorities -curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7-liberty/current-passed-ci/delorean.repo -curl -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos7-liberty/delorean-deps.repo - -cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys -chown stack:stack /home/stack/.ssh/authorized_keys -EOI - -# copy instackenv file for future virt deployments -if [ ! -d stack ]; then mkdir stack; fi -scp ${SSH_OPTIONS[@]} stack@$UNDERCLOUD:instackenv.json stack/instackenv.json - -# make a copy of instack VM's definitions, and disk image -# it must be stopped to make a copy of its disk image -ssh -T ${SSH_OPTIONS[@]} stack@localhost < /dev/null && [ $CNT -gt 0 ]; do - echo -n "." - sleep 5 - CNT=CNT-1 -done -if virsh list | grep instack > /dev/null; then - echo "instack failed to shutdown for copy" - exit 1 -fi - -echo $'\nGenerating libvirt configuration' -for i in \$(seq 0 $vm_index); do - virsh dumpxml baremetalbrbm_brbm1_brbm2_brbm3_\$i | awk '/model type='\''virtio'\''/{c++;if(c==2){sub("model type='\''virtio'\''","model type='\''rtl8139'\''");c=0}}1' > baremetalbrbm_brbm1_brbm2_brbm3_\$i.xml -done - -virsh dumpxml instack > instack.xml -virsh net-dumpxml brbm > brbm-net.xml -virsh net-dumpxml brbm1 > brbm1-net.xml -virsh net-dumpxml brbm2> brbm2-net.xml -virsh net-dumpxml brbm3 > brbm3-net.xml -virsh pool-dumpxml default > default-pool.xml -EOI - -# copy off the instack artifacts -echo "Copying instack files to build directory" -for i in $(seq 0 $vm_index); do - scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml . -done - -scp ${SSH_OPTIONS[@]} stack@localhost:instack.xml . -scp ${SSH_OPTIONS[@]} stack@localhost:brbm-net.xml . -scp ${SSH_OPTIONS[@]} stack@localhost:brbm1-net.xml . -scp ${SSH_OPTIONS[@]} stack@localhost:brbm2-net.xml . -scp ${SSH_OPTIONS[@]} stack@localhost:brbm3-net.xml . -scp ${SSH_OPTIONS[@]} stack@localhost:default-pool.xml . - -# pull down the the built images -echo "Copying overcloud resources" -IMAGES="overcloud-full.tar" -IMAGES+=" undercloud.qcow2" - -for i in $IMAGES; do - # download prebuilt images from RDO Project - if [ ! -f stack/$i ] || [ "$(curl -L $rdo_images_uri/${i}.md5 | awk {'print $1'})" != "$(md5sum stack/$i | awk {'print $1'})" ] ; then - #if [ $i == "undercloud.qcow2" ]; then - ### there's a problem with the Content-Length reported by the centos artifacts - ### server so using wget for it until a resolution is figured out. - #wget -nv -O stack/$i $rdo_images_uri/$i - #else - curl $rdo_images_uri/$i -o stack/$i - #fi - fi - # only untar the tar files - if [ "${i##*.}" == "tar" ]; then tar -xf stack/$i -C stack/; fi -done - -#Adding OpenStack packages to undercloud -pushd stack -cp undercloud.qcow2 instack.qcow2 -LIBGUESTFS_BACKEND=direct virt-customize --install yum-priorities -a instack.qcow2 -PACKAGES="qemu-kvm-common,qemu-kvm,libvirt-daemon-kvm,libguestfs,python-libguestfs,openstack-nova-compute" -PACKAGES+=",openstack-swift,openstack-ceilometer-api,openstack-neutron-ml2,openstack-ceilometer-alarm" -PACKAGES+=",openstack-nova-conductor,openstack-ironic-inspector,openstack-ironic-api,python-openvswitch" -PACKAGES+=",openstack-glance,python-glance,python-troveclient,openstack-puppet-modules" -PACKAGES+=",openstack-neutron,openstack-neutron-openvswitch,openstack-nova-scheduler,openstack-keystone,openstack-swift-account" -PACKAGES+=",openstack-swift-container,openstack-swift-object,openstack-swift-plugin-swift3,openstack-swift-proxy" -PACKAGES+=",openstack-nova-api,openstack-nova-cert,openstack-heat-api-cfn,openstack-heat-api," -PACKAGES+=",openstack-ceilometer-central,openstack-ceilometer-polling,openstack-ceilometer-collector," -PACKAGES+=",openstack-heat-api-cloudwatch,openstack-heat-engine,openstack-heat-common,openstack-ceilometer-notification" -PACKAGES+=",hiera,puppet,memcached,keepalived,mariadb,mariadb-server,rabbitmq-server,python-pbr,python-proliantutils" -PACKAGES+=",ceph-common" - -# install the packages above and enabling ceph to live on the controller -# OpenWSMan package update supports the AMT Ironic driver for the TealBox -LIBGUESTFS_BACKEND=direct virt-customize --install $PACKAGES \ - --run-command "sed -i '/ControllerEnableCephStorage/c\\ ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \ - --run-command "sed -i '/ \$enable_ceph = /c\\ \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller_pacemaker.pp" \ - --run-command "sed -i '/ \$enable_ceph = /c\\ \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller.pp" \ - --run-command "curl http://download.opensuse.org/repositories/Openwsman/CentOS_CentOS-7/Openwsman.repo > /etc/yum.repos.d/wsman.repo" \ - --run-command "yum update -y openwsman*" \ - --run-command "sed -i '/pxe_wol/c\\ enabled_drivers => ['pxe_ipmitool', 'pxe_ssh', 'pxe_drac', 'pxe_ilo', 'pxe_wol', 'pxe_amt'],' /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp" \ - -a instack.qcow2 -popd - - -pushd stack - -########################################################## -##### Prep initial overcloud image with common deps ##### -########################################################## - -# make a copy of the cached overcloud-full image -cp overcloud-full.qcow2 overcloud-full-opendaylight.qcow2 -# Update puppet-aodh it's old -rm -rf aodh -git clone https://github.com/openstack/puppet-aodh aodh -pushd aodh -git checkout stable/liberty -popd -tar -czf puppet-aodh.tar.gz aodh - -# Add epel, aodh and ceph, remove openstack-neutron-openvswitch -AODH_PKG="openstack-aodh-api,openstack-aodh-common,openstack-aodh-compat,openstack-aodh-evaluator,openstack-aodh-expirer" -AODH_PKG+=",openstack-aodh-listener,openstack-aodh-notifier" -LIBGUESTFS_BACKEND=direct virt-customize \ - --upload puppet-aodh.tar.gz:/etc/puppet/modules/ \ - --run-command "cd /etc/puppet/modules/ && rm -rf aodh && tar xzf puppet-aodh.tar.gz" \ - --run-command "yum remove -y openstack-neutron-openvswitch" \ - --run-command "echo 'nf_conntrack_proto_sctp' > /etc/modules-load.d/nf_conntrack_proto_sctp.conf" \ - --run-command "if ! rpm -q epel-release > /dev/null; then yum install -y http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm; fi" \ - --install https://github.com/michaeltchapman/networking_rpm/raw/master/openstack-neutron-bgpvpn-2015.2-1.el7.centos.noarch.rpm \ - --install "$AODH_PKG,ceph" \ - -a overcloud-full-opendaylight.qcow2 - -############################################### -##### Adding OpenDaylight to overcloud ##### -############################################### - -cat > /tmp/opendaylight.repo << EOF -[opendaylight] -name=OpenDaylight \$releasever - \$basearch -baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-40-release/\$basearch/os/ -enabled=1 -gpgcheck=0 -EOF - -odlrpm=opendaylight-4.0.0-1.el7.noarch.rpm -if [ -f ${rdo_images_cache}/$odlrpm ]; then - LIBGUESTFS_BACKEND=direct virt-customize --upload ${rdo_images_cache}/$odlrpm:/tmp/ - opendaylight=/tmp/$odlrpm -else - opendaylight=opendaylight -fi - -# install ODL packages -LIBGUESTFS_BACKEND=direct virt-customize \ - --upload /tmp/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \ - --install ${opendaylight},python-networking-odl \ - -a overcloud-full-opendaylight.qcow2 - -# install Jolokia for ODL HA -LIBGUESTFS_BACKEND=direct virt-customize \ - --upload ${odl_artifacts_cache}/jolokia.tar.gz:/tmp/ \ - --run-command "tar -xvf /tmp/jolokia.tar.gz -C /opt/opendaylight/system/org" \ - -a overcloud-full-opendaylight.qcow2 - -## WORK AROUND -## when OpenDaylight lands in upstream RDO manager this can be removed - -# upload the opendaylight puppet module -rm -rf puppet-opendaylight -# TMP FIX to see if this works -git clone -b odl_ha_proxy_fix https://github.com/trozet/puppet-opendaylight -pushd puppet-opendaylight -git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight.tar.gz -popd - -# grab latest puppet-neutron module -rm -rf puppet-neutron -git clone -b stable/liberty https://github.com/openstack/puppet-neutron.git -pushd puppet-neutron -git archive --format=tar.gz --prefix=neutron/ HEAD > ../puppet-neutron.tar.gz -popd - -LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ \ - --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" \ - --run-command "rm -rf /etc/puppet/modules/neutron" \ - --upload puppet-neutron.tar.gz:/etc/puppet/modules/ \ - --run-command "cd /etc/puppet/modules/ && tar xzf puppet-neutron.tar.gz" \ - -a overcloud-full-opendaylight.qcow2 - -# Patch in OpenDaylight installation and configuration -LIBGUESTFS_BACKEND=direct virt-customize --upload ../opnfv-tripleo-heat-templates.patch:/tmp \ - --run-command "cd /usr/share/openstack-tripleo-heat-templates/ && patch -Np1 < /tmp/opnfv-tripleo-heat-templates.patch" \ - -a instack.qcow2 - -# Patch in OPNFV custom puppet-tripleO -LIBGUESTFS_BACKEND=direct virt-customize --upload ../opnfv-puppet-tripleo.patch:/tmp \ - --run-command "cd /etc/puppet/modules/tripleo && patch -Np1 < /tmp/opnfv-puppet-tripleo.patch" \ - -a overcloud-full-opendaylight.qcow2 - -# REMOVE ME AFTER Brahmaputra -LIBGUESTFS_BACKEND=direct virt-customize --upload ../puppet-cinder-quota-fix.patch:/tmp \ - --run-command "cd /etc/puppet/modules/cinder && patch -Np1 < /tmp/puppet-cinder-quota-fix.patch" \ - -a overcloud-full-opendaylight.qcow2 - -LIBGUESTFS_BACKEND=direct virt-customize --upload ../aodh-puppet-tripleo.patch:/tmp \ - --run-command "cd /etc/puppet/modules/tripleo && patch -Np1 < /tmp/aodh-puppet-tripleo.patch" \ - -a overcloud-full-opendaylight.qcow2 - -# adds tripleoclient aodh workaround -# for keystone -LIBGUESTFS_BACKEND=direct virt-customize --upload ../aodh-tripleoclient.patch:/tmp \ - --run-command "cd /usr/lib/python2.7/site-packages/tripleoclient && patch -Np1 < /tmp/aodh-tripleoclient.patch" \ - --upload ../aodh-os-cloud-config.patch:/tmp \ - --run-command "cd /usr/lib/python2.7/site-packages/os_cloud_config && patch -Np1 < /tmp/aodh-os-cloud-config.patch" \ - -a instack.qcow2 -# END REMOVE ME AFTER Brahmaputra - -################################################ -##### Adding SFC+OpenDaylight overcloud ##### -################################################ - -# work around for XFS grow bug -# http://xfs.org/index.php/XFS_FAQ#Q:_Why_do_I_receive_No_space_left_on_device_after_xfs_growfs.3F -cat > /tmp/xfs-grow-remount-fix.service << EOF -[Unit] -Description=XFS Grow Bug Remount -After=network.target -Before=getty@tty1.service - -[Service] -Type=oneshot -ExecStart=/bin/bash -c "echo 'XFS Grow Bug Remount Sleeping 180s' && sleep 180 && echo 'XFS Grow Bug Remounting Now' && mount -o remount,inode64 /" -RemainAfterExit=no - -[Install] -WantedBy=multi-user.target -EOF - - -#copy opendaylight overcloud full to isolate odl-sfc -cp overcloud-full-opendaylight.qcow2 overcloud-full-opendaylight-sfc.qcow2 - -LIBGUESTFS_BACKEND=direct virt-customize \ - --upload "/tmp/xfs-grow-remount-fix.service:/etc/systemd/system/xfs-grow-remount-fix.service" \ - --run-command "chmod 664 /etc/systemd/system/xfs-grow-remount-fix.service" \ - --run-command "systemctl enable xfs-grow-remount-fix.service" \ - --install 'https://radez.fedorapeople.org/kernel-ml-3.13.7-1.el7.centos.x86_64.rpm' \ - --run-command 'grub2-set-default "\$(grep -P \"submenu|^menuentry\" /boot/grub2/grub.cfg | cut -d \"\\x27\" | head -n 1)"' \ - --install 'https://radez.fedorapeople.org/openvswitch-kmod-2.3.90-1.el7.centos.x86_64.rpm' \ - --run-command 'yum downgrade -y https://radez.fedorapeople.org/openvswitch-2.3.90-1.x86_64.rpm' \ - --run-command 'rm -f /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/net/openvswitch/openvswitch.ko' \ - --run-command 'ln -s /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/extra/openvswitch/openvswitch.ko /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/net/openvswitch/openvswitch.ko' \ - -a overcloud-full-opendaylight-sfc.qcow2 - - - -############################################### -##### Adding ONOS to overcloud ##### -############################################### - -## WORK AROUND -## when ONOS lands in upstream OPNFV artifacts this can be removed - -# upload the onos puppet module - -rm -rf puppet-onos -git clone https://github.com/bobzhouHW/puppet-onos.git -pushd puppet-onos -# download jdk, onos and maven dependancy packages. -pushd files -curl ${onos_artifacts_uri}/jdk-8u51-linux-x64.tar.gz -o ./jdk-8u51-linux-x64.tar.gz -curl ${onos_artifacts_uri}/onos-1.3.0.tar.gz -o ./onos-1.3.0.tar.gz -curl ${onos_artifacts_uri}/repository.tar -o ./repository.tar -popd -popd -mv puppet-onos onos -tar -czf puppet-onos.tar.gz onos -LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-onos.tar.gz:/etc/puppet/modules/ \ - --run-command "cd /etc/puppet/modules/ && tar xzf puppet-onos.tar.gz" -a overcloud-full-opendaylight.qcow2 - -## END WORK AROUND - -popd - -# move and Sanitize private keys from instack.json file -mv stack/instackenv.json instackenv-virt.json -sed -i '/pm_password/c\ "pm_password": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json -sed -i '/ssh-key/c\ "ssh-key": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json - -# clean up the VMs -ssh -T ${SSH_OPTIONS[@]} stack@localhost < /dev/null || echo -n '' -virsh undefine instack --remove-all-storage 2> /dev/null || echo -n '' -for i in \$(seq 0 $vm_index); do - virsh destroy baremetalbrbm_brbm1_brbm2_brbm3_\$i 2> /dev/null || echo -n '' - virsh undefine baremetalbrbm_brbm1_brbm2_brbm3_\$i --remove-all-storage 2> /dev/null || echo -n '' -done -EOI - diff --git a/build/opnfv-apex-common.spec b/build/opnfv-apex-common.spec index e0fd475a..a635a52d 100644 --- a/build/opnfv-apex-common.spec +++ b/build/opnfv-apex-common.spec @@ -9,8 +9,8 @@ URL: https://gerrit.opnfv.org/gerrit/apex.git Source0: opnfv-apex-common.tar.gz BuildArch: noarch -BuildRequires: openvswitch qemu-kvm python-docutils -Requires: opnfv-apex-sdn opnfv-apex-undercloud openvswitch qemu-kvm bridge-utils libguestfs-tools +BuildRequires: python-docutils +Requires: openstack-tripleo opnfv-apex-sdn opnfv-apex-undercloud openvswitch qemu-kvm bridge-utils libguestfs-tools %description Scripts for OPNFV deployment using RDO Manager @@ -35,7 +35,7 @@ install config/deploy/os-odl_l2-sfc-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-a install config/deploy/os-odl_l3-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_l3-nofeature-ha.yaml install config/deploy/os-onos-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-onos-nofeature-ha.yaml install config/deploy/os-opencontrail-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-opencontrail-nofeature-ha.yaml -install config/deploy/network/network_settings.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/network_settings.yaml +install config/network/network_settings.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/network_settings.yaml mkdir -p %{buildroot}%{_var}/opt/opnfv/lib/ install lib/common-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/ @@ -48,7 +48,7 @@ install docs/installation-instructions.html %{buildroot}%{_docdir}/opnfv/ install docs/release-notes/index.rst %{buildroot}%{_docdir}/opnfv/release-notes.rst install docs/release-notes.html %{buildroot}%{_docdir}/opnfv/ install config/deploy/deploy_settings.yaml %{buildroot}%{_docdir}/opnfv/deploy_settings.yaml.example -install config/deploy/network/network_settings.yaml %{buildroot}%{_docdir}/opnfv/network_settings.yaml.example +install config/network/network_settings.yaml %{buildroot}%{_docdir}/opnfv/network_settings.yaml.example install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/inventory.yaml.example %files diff --git a/build/opnfv-apex-onos.spec b/build/opnfv-apex-onos.spec new file mode 100644 index 00000000..a5db6c57 --- /dev/null +++ b/build/opnfv-apex-onos.spec @@ -0,0 +1,32 @@ +Name: opnfv-apex-onos +Version: 2.1 +Release: %{release} +Summary: Overcloud Disk images for OPNFV Apex ONOS deployment + +Group: System Environment +License: Apache 2.0 +URL: https://gerrit.opnfv.org/gerrit/apex.git +Source0: opnfv-apex-onos.tar.gz + +Provides: opnfv-apex-sdn +BuildArch: noarch +Requires: opnfv-apex-common opnfv-apex-undercloud + +%description +Overcloud Disk images for OPNFV Apex ONOS deployment +https://wiki.opnfv.org/apex + +%prep +%setup -q + +%install +mkdir -p %{buildroot}%{_var}/opt/opnfv/images/ +install build/images/overcloud-full-onos.qcow2 %{buildroot}%{_var}/opt/opnfv/images/ + +%files +%defattr(644, root, root, -) +%{_var}/opt/opnfv/images/overcloud-full-onos.qcow2 + +%changelog +* Mon Mar 07 2016 Dan Radez - 2.1-1 +- Initial Packaging diff --git a/build/opnfv-apex-opendaylight-sfc.spec b/build/opnfv-apex-opendaylight-sfc.spec index 6d980f21..a3f8d465 100644 --- a/build/opnfv-apex-opendaylight-sfc.spec +++ b/build/opnfv-apex-opendaylight-sfc.spec @@ -20,12 +20,12 @@ https://wiki.opnfv.org/apex %setup -q %install -mkdir -p %{buildroot}%{_var}/opt/opnfv/stack/ -install build/stack/overcloud-full-opendaylight-sfc.qcow2 %{buildroot}%{_var}/opt/opnfv/stack/ +mkdir -p %{buildroot}%{_var}/opt/opnfv/images/ +install build/images/overcloud-full-opendaylight-sfc.qcow2 %{buildroot}%{_var}/opt/opnfv/images/ %files %defattr(644, root, root, -) -%{_var}/opt/opnfv/stack/overcloud-full-opendaylight-sfc.qcow2 +%{_var}/opt/opnfv/images/overcloud-full-opendaylight-sfc.qcow2 %changelog * Tue Jan 19 2016 Dan Radez - 2.1-1 diff --git a/build/opnfv-apex-undercloud.spec b/build/opnfv-apex-undercloud.spec index 8d585db2..5f093c63 100644 --- a/build/opnfv-apex-undercloud.spec +++ b/build/opnfv-apex-undercloud.spec @@ -20,17 +20,10 @@ https://wiki.opnfv.org/apex %setup -q %install -mkdir -p %{buildroot}%{_var}/opt/opnfv/stack/ +mkdir -p %{buildroot}%{_var}/opt/opnfv/images/ mkdir -p %{buildroot}%{_var}/opt/opnfv/nics/ -install build/instack.qcow2 %{buildroot}%{_var}/opt/opnfv/stack/ -install build/instack.xml %{buildroot}%{_var}/opt/opnfv/ -install build/baremetalbrbm_brbm1_brbm2_brbm3_*.xml %{buildroot}%{_var}/opt/opnfv/ -install build/brbm-net.xml %{buildroot}%{_var}/opt/opnfv/ -install build/brbm1-net.xml %{buildroot}%{_var}/opt/opnfv/ -install build/brbm2-net.xml %{buildroot}%{_var}/opt/opnfv/ -install build/brbm3-net.xml %{buildroot}%{_var}/opt/opnfv/ -install build/default-pool.xml %{buildroot}%{_var}/opt/opnfv/ +install build/undercloud.qcow2 %{buildroot}%{_var}/opt/opnfv/images/ install build/network-environment.yaml %{buildroot}%{_var}/opt/opnfv/ install build/nics/controller.yaml %{buildroot}%{_var}/opt/opnfv/nics/ install build/nics/compute.yaml %{buildroot}%{_var}/opt/opnfv/nics/ @@ -52,19 +45,10 @@ install build/nics/compute_br-ex_no-public-ip.yaml %{buildroot}%{_var}/opt/opnfv install build/nics/compute_private_br-ex_no-public-ip.yaml %{buildroot}%{_var}/opt/opnfv/nics/ install build/nics/compute_storage_br-ex_no-public-ip.yaml %{buildroot}%{_var}/opt/opnfv/nics/ install build/nics/compute_private_storage_br-ex_no-public-ip.yaml %{buildroot}%{_var}/opt/opnfv/nics/ -install build/instackenv-virt.json %{buildroot}%{_var}/opt/opnfv/ -install build/instackenv.json.example %{buildroot}%{_var}/opt/opnfv/ %files %defattr(644, root, root, -) -%{_var}/opt/opnfv/stack/instack.qcow2 -%{_var}/opt/opnfv/instack.xml -%{_var}/opt/opnfv/baremetalbrbm_brbm1_brbm2_brbm3_*.xml -%{_var}/opt/opnfv/brbm-net.xml -%{_var}/opt/opnfv/brbm1-net.xml -%{_var}/opt/opnfv/brbm2-net.xml -%{_var}/opt/opnfv/brbm3-net.xml -%{_var}/opt/opnfv/default-pool.xml +%{_var}/opt/opnfv/images/undercloud.qcow2 %{_var}/opt/opnfv/network-environment.yaml %{_var}/opt/opnfv/nics/controller.yaml %{_var}/opt/opnfv/nics/compute.yaml @@ -86,8 +70,6 @@ install build/instackenv.json.example %{buildroot}%{_var}/opt/opnfv/ %{_var}/opt/opnfv/nics/compute_private_br-ex_no-public-ip.yaml %{_var}/opt/opnfv/nics/compute_storage_br-ex_no-public-ip.yaml %{_var}/opt/opnfv/nics/compute_private_storage_br-ex_no-public-ip.yaml -%{_var}/opt/opnfv/instackenv-virt.json -%{_var}/opt/opnfv/instackenv.json.example %changelog * Thu Jan 14 2016 Dan Radez - 2.1-1 diff --git a/build/opnfv-apex.spec b/build/opnfv-apex.spec index 81b8d656..011a1847 100644 --- a/build/opnfv-apex.spec +++ b/build/opnfv-apex.spec @@ -20,14 +20,17 @@ https://wiki.opnfv.org/apex %setup -q %install -mkdir -p %{buildroot}%{_var}/opt/opnfv/stack/ -install build/stack/overcloud-full-opendaylight.qcow2 %{buildroot}%{_var}/opt/opnfv/stack/ +mkdir -p %{buildroot}%{_var}/opt/opnfv/images/ +install build/images/overcloud-full-opendaylight.qcow2 %{buildroot}%{_var}/opt/opnfv/images/ %files %defattr(644, root, root, -) -%{_var}/opt/opnfv/stack/overcloud-full-opendaylight.qcow2 +%{_var}/opt/opnfv/images/overcloud-full-opendaylight.qcow2 %changelog +* Wed Jan 20 2016 Dan Radez - 2.1-4 +- cleaning out libvirt config files +- replacing instack-virt-setup with direct tripleo calls * Tue Jan 19 2016 Dan Radez - 2.1-3 - Remove conflicts with other SDN controllers, they can co-exist now - update overcloud image name to specify opendaylight diff --git a/build/opnfv-tripleo-heat-templates.patch b/build/opnfv-tripleo-heat-templates.patch index 019424b1..ba59e22d 100644 --- a/build/opnfv-tripleo-heat-templates.patch +++ b/build/opnfv-tripleo-heat-templates.patch @@ -340,7 +340,7 @@ index a532c2f..9c6e3cd 100644 NeutronTenantNetwork: tenant CeilometerApiNetwork: internal_api + AodhApiNetwork: internal_api -+ OpenDaylightNetwork: internal_api ++ OpenDaylightApiNetwork: internal_api MongoDbNetwork: internal_api CinderApiNetwork: internal_api CinderIscsiNetwork: storage diff --git a/build/overcloud-full.sh b/build/overcloud-full.sh new file mode 100755 index 00000000..5e11b778 --- /dev/null +++ b/build/overcloud-full.sh @@ -0,0 +1,64 @@ +#!/bin/sh +############################################################################## +# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +set -e +source ./cache.sh +source ./variables.sh + +populate_cache "$rdo_images_uri/overcloud-full.tar" + +if [ ! -d images/ ]; then mkdir images; fi +tar -xf cache/overcloud-full.tar -C images/ +mv -f images/overcloud-full.qcow2 images/overcloud-full_build.qcow2 + +########################################################## +##### Prep initial overcloud image with common deps ##### +########################################################## + +pushd images > /dev/null +# Update puppet-aodh it's old +rm -rf aodh +git clone https://github.com/openstack/puppet-aodh aodh +pushd aodh > /dev/null +git checkout stable/liberty +popd > /dev/null +tar -czf puppet-aodh.tar.gz aodh + +# grab latest puppet-neutron module +rm -rf puppet-neutron +git clone -b stable/liberty https://github.com/openstack/puppet-neutron.git +pushd puppet-neutron +git archive --format=tar.gz --prefix=neutron/ HEAD > ../puppet-neutron.tar.gz +popd + + +# Add epel to install ceph +# update aodh +# puppet-neutron-force-metadata.patch +# puppet-cinder-quota-fix +#aodh-puppet-tripleo +AODH_PKG="openstack-aodh-api,openstack-aodh-common,openstack-aodh-compat,openstack-aodh-evaluator,openstack-aodh-expirer" +AODH_PKG+=",openstack-aodh-listener,openstack-aodh-notifier" +LIBGUESTFS_BACKEND=direct virt-customize \ + --upload puppet-aodh.tar.gz:/etc/puppet/modules/ \ + --run-command "cd /etc/puppet/modules/ && rm -rf aodh && tar xzf puppet-aodh.tar.gz" \ + --run-command "echo 'nf_conntrack_proto_sctp' > /etc/modules-load.d/nf_conntrack_proto_sctp.conf" \ + --install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \ + --install "$AODH_PKG,ceph" \ + --run-command "rm -rf /etc/puppet/modules/neutron" \ + --upload puppet-neutron.tar.gz:/etc/puppet/modules/ \ + --run-command "cd /etc/puppet/modules/ && tar xzf puppet-neutron.tar.gz" \ + --upload ../puppet-cinder-quota-fix.patch:/tmp \ + --run-command "cd /etc/puppet/modules/cinder && patch -Np1 < /tmp/puppet-cinder-quota-fix.patch" \ + --upload ../aodh-puppet-tripleo.patch:/tmp \ + --run-command "cd /etc/puppet/modules/tripleo && patch -Np1 < /tmp/aodh-puppet-tripleo.patch" \ + -a overcloud-full_build.qcow2 + +mv -f overcloud-full_build.qcow2 overcloud-full.qcow2 +popd > /dev/null diff --git a/build/overcloud-onos.sh b/build/overcloud-onos.sh new file mode 100755 index 00000000..88f0b166 --- /dev/null +++ b/build/overcloud-onos.sh @@ -0,0 +1,40 @@ +#!/bin/sh +############################################################################## +# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +set -e +source ./cache.sh +source ./variables.sh + +pushd images > /dev/null +cp -f overcloud-full.qcow2 overcloud-full-onos_build.qcow2 + +####################################### +##### Adding ONOS to overcloud ##### +####################################### + +# upload the onos puppet module +rm -rf puppet-onos +git clone https://github.com/bobzhouHW/puppet-onos.git +pushd puppet-onos > /dev/null + +# download jdk, onos and maven dependancy packages. +pushd files +for i in jdk-8u51-linux-x64.tar.gz onos-1.3.0.tar.gz repository.tar; do + populate_cache ${onos_artifacts_uri}/$i + get_cached_file $i +done +popd > /dev/null + +popd > /dev/null +tar --xform="s:puppet-onos/:onos/:" -czf puppet-onos.tar.gz puppet-onos + +LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-onos.tar.gz:/etc/puppet/modules/ \ + --run-command "cd /etc/puppet/modules/ && tar xzf puppet-onos.tar.gz" -a overcloud-full-onos_build.qcow2 +mv overcloud-full-onos_build.qcow2 overcloud-full-onos.qcow2 +popd > /dev/null diff --git a/build/overcloud-opendaylight-sfc.sh b/build/overcloud-opendaylight-sfc.sh new file mode 100755 index 00000000..9b38ca29 --- /dev/null +++ b/build/overcloud-opendaylight-sfc.sh @@ -0,0 +1,50 @@ +#!/bin/sh +############################################################################## +# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +set -e + +################################################ +##### Adding SFC+OpenDaylight overcloud ##### +################################################ + +#copy opendaylight overcloud full to isolate odl-sfc +cp -f images/overcloud-full-opendaylight.qcow2 images/overcloud-full-opendaylight-sfc_build.qcow2 + +# work around for XFS grow bug +# http://xfs.org/index.php/XFS_FAQ#Q:_Why_do_I_receive_No_space_left_on_device_after_xfs_growfs.3F +cat > /tmp/xfs-grow-remount-fix.service << EOF +[Unit] +Description=XFS Grow Bug Remount +After=network.target +Before=getty@tty1.service + +[Service] +Type=oneshot +ExecStart=/bin/bash -c "echo 'XFS Grow Bug Remount Sleeping 180s' && sleep 180 && echo 'XFS Grow Bug Remounting Now' && mount -o remount,inode64 /" +RemainAfterExit=no + +[Install] +WantedBy=multi-user.target +EOF + + +# kernel is patched with patch from this post +# http://xfs.org/index.php/XFS_FAQ#Q:_Why_do_I_receive_No_space_left_on_device_after_xfs_growfs.3F +LIBGUESTFS_BACKEND=direct virt-customize \ + --upload "/tmp/xfs-grow-remount-fix.service:/etc/systemd/system/xfs-grow-remount-fix.service" \ + --run-command "chmod 664 /etc/systemd/system/xfs-grow-remount-fix.service" \ + --run-command "systemctl enable xfs-grow-remount-fix.service" \ + --install 'https://radez.fedorapeople.org/kernel-ml-3.13.7-1.el7.centos.x86_64.rpm' \ + --run-command 'grub2-set-default "\$(grep -P \"submenu|^menuentry\" /boot/grub2/grub.cfg | cut -d \"\\x27\" | head -n 1)"' \ + --install 'https://radez.fedorapeople.org/openvswitch-kmod-2.3.90-1.el7.centos.x86_64.rpm' \ + --run-command 'yum downgrade -y https://radez.fedorapeople.org/openvswitch-2.3.90-1.x86_64.rpm' \ + --run-command 'rm -f /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/net/openvswitch/openvswitch.ko' \ + --run-command 'ln -s /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/extra/openvswitch/openvswitch.ko /lib/modules/3.13.7-1.el7.centos.x86_64/kernel/net/openvswitch/openvswitch.ko' \ + -a images/overcloud-full-opendaylight-sfc_build.qcow2 +mv images/overcloud-full-opendaylight-sfc_build.qcow2 images/overcloud-full-opendaylight-sfc.qcow2 diff --git a/build/overcloud-opendaylight.sh b/build/overcloud-opendaylight.sh new file mode 100755 index 00000000..7e6c820c --- /dev/null +++ b/build/overcloud-opendaylight.sh @@ -0,0 +1,57 @@ +#!/bin/sh +############################################################################## +# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +set -e +source ./variables.sh + +pushd images > /dev/null + +cp -f overcloud-full.qcow2 overcloud-full-opendaylight_build.qcow2 + +############################################### +##### Adding OpenDaylight to overcloud ##### +############################################### + +cat > /tmp/opendaylight.repo << EOF +[opendaylight] +name=OpenDaylight \$releasever - \$basearch +baseurl=http://cbs.centos.org/repos/nfv7-opendaylight-40-release/\$basearch/os/ +enabled=1 +gpgcheck=0 +EOF + +# install ODL packages +# patch puppet-neutron: ODL Bug, Url check reports ODL is up but it's not quite up +LIBGUESTFS_BACKEND=direct virt-customize \ + --upload /tmp/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \ + --install opendaylight,python-networking-odl \ + --install https://github.com/michaeltchapman/networking_rpm/raw/master/openstack-neutron-bgpvpn-2015.2-1.el7.centos.noarch.rpm \ + -a overcloud-full-opendaylight_build.qcow2 + +# install Jolokia for ODL HA +LIBGUESTFS_BACKEND=direct virt-customize \ + --upload ${odl_artifacts_cache}/jolokia.tar.gz:/tmp/ \ + --run-command "tar -xvf /tmp/jolokia.tar.gz -C /opt/opendaylight/system/org" \ + -a overcloud-full-opendaylight_build.qcow2 + +## WORK AROUND +## when OpenDaylight lands in upstream RDO manager this can be removed + +# upload the opendaylight puppet module +rm -rf puppet-opendaylight +git clone -b opnfv_integration https://github.com/dfarrell07/puppet-opendaylight +pushd puppet-opendaylight > /dev/null +git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight.tar.gz +popd > /dev/null +LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ \ + --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" \ + -a overcloud-full-opendaylight_build.qcow2 + +mv overcloud-full-opendaylight_build.qcow2 overcloud-full-opendaylight.qcow2 +popd > /dev/null diff --git a/build/undercloud.sh b/build/undercloud.sh new file mode 100755 index 00000000..351eaf3a --- /dev/null +++ b/build/undercloud.sh @@ -0,0 +1,57 @@ +#!/bin/sh +############################################################################## +# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +set -e +source ./cache.sh +source ./variables.sh + +populate_cache "$rdo_images_uri/undercloud.qcow2" +if [ ! -d images ]; then mkdir images/; fi +cp -f cache/undercloud.qcow2 images/ + +#Adding OpenStack packages to undercloud +pushd images > /dev/null + +LIBGUESTFS_BACKEND=direct virt-customize --install yum-priorities -a undercloud.qcow2 +PACKAGES="qemu-kvm-common,qemu-kvm,libvirt-daemon-kvm,libguestfs,python-libguestfs,openstack-nova-compute" +PACKAGES+=",openstack-swift,openstack-ceilometer-api,openstack-neutron-ml2,openstack-ceilometer-alarm" +PACKAGES+=",openstack-nova-conductor,openstack-ironic-inspector,openstack-ironic-api,python-openvswitch" +PACKAGES+=",openstack-glance,python-glance,python-troveclient,openstack-puppet-modules" +PACKAGES+=",openstack-neutron,openstack-neutron-openvswitch,openstack-nova-scheduler,openstack-keystone,openstack-swift-account" +PACKAGES+=",openstack-swift-container,openstack-swift-object,openstack-swift-plugin-swift3,openstack-swift-proxy" +PACKAGES+=",openstack-nova-api,openstack-nova-cert,openstack-heat-api-cfn,openstack-heat-api," +PACKAGES+=",openstack-ceilometer-central,openstack-ceilometer-polling,openstack-ceilometer-collector," +PACKAGES+=",openstack-heat-api-cloudwatch,openstack-heat-engine,openstack-heat-common,openstack-ceilometer-notification" +PACKAGES+=",hiera,puppet,memcached,keepalived,mariadb,mariadb-server,rabbitmq-server,python-pbr,python-proliantutils" +PACKAGES+=",ceph-common" + +# install the packages above and enabling ceph to live on the controller +# OpenWSMan package update supports the AMT Ironic driver for the TealBox +LIBGUESTFS_BACKEND=direct virt-customize --install $PACKAGES \ + --run-command "sed -i '/ControllerEnableCephStorage/c\\ ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \ + --run-command "sed -i '/ \$enable_ceph = /c\\ \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller_pacemaker.pp" \ + --run-command "sed -i '/ \$enable_ceph = /c\\ \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller.pp" \ + --run-command "curl http://download.opensuse.org/repositories/Openwsman/CentOS_CentOS-7/Openwsman.repo > /etc/yum.repos.d/wsman.repo" \ + --run-command "yum update -y openwsman*" \ + --run-command "sed -i '/pxe_wol/c\\ enabled_drivers => ['pxe_ipmitool', 'pxe_ssh', 'pxe_drac', 'pxe_ilo', 'pxe_wol', 'pxe_amt'],' /usr/share/instack-undercloud/puppet-stack-config/puppet-stack-config.pp" \ + -a undercloud.qcow2 + +# Patch in OpenDaylight installation and configuration +LIBGUESTFS_BACKEND=direct virt-customize --upload ../opnfv-tripleo-heat-templates.patch:/tmp \ + --run-command "cd /usr/share/openstack-tripleo-heat-templates/ && patch -Np1 < /tmp/opnfv-tripleo-heat-templates.patch" \ + -a undercloud.qcow2 + +# adds tripleoclient aodh workaround +# for keystone +LIBGUESTFS_BACKEND=direct virt-customize --upload ../aodh-tripleoclient.patch:/tmp \ + --run-command "cd /usr/lib/python2.7/site-packages/tripleoclient && patch -Np1 < /tmp/aodh-tripleoclient.patch" \ + --upload ../aodh-os-cloud-config.patch:/tmp \ + --run-command "cd /usr/lib/python2.7/site-packages/os_cloud_config && patch -Np1 < /tmp/aodh-os-cloud-config.patch" \ + -a undercloud.qcow2 +popd > /dev/null diff --git a/build/variables.sh b/build/variables.sh new file mode 100644 index 00000000..9342ddf9 --- /dev/null +++ b/build/variables.sh @@ -0,0 +1,16 @@ +#!/bin/sh +############################################################################## +# Copyright (c) 2015 Tim Rozet (Red Hat), Dan Radez (Red Hat) and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +rdo_images_cache=/stable-images +#rdo_images_uri=https://ci.centos.org/artifacts/rdo/images/mitaka/delorean/stable/ +rdo_images_uri=file://$rdo_images_cache +#onos_artifacts_uri=http://205.177.226.237:9999/onosfw +onos_artifacts_uri=file:///stable-images/onos/ +odl_artifacts_cache=/stable-images/odl diff --git a/ci/build.sh b/ci/build.sh index 82d5b637..b5bfc8ca 100755 --- a/ci/build.sh +++ b/ci/build.sh @@ -1,411 +1,113 @@ -#!/bin/bash -set -e +#!/bin/sh ############################################################################## -# Copyright (c) 2015 Ericsson AB and others. -# stefan.k.berg@ericsson.com -# jonas.bjurel@ericsson.com -# dradez@redhat.com +# Copyright (c) 2016 Dan Radez (Red Hat) and others. +# # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -trap 'echo "Exiting ..."; \ -if [ -f ${LOCK_FILE} ]; then \ - if [ $(cat ${LOCK_FILE}) -eq $$ ]; then \ - rm -f ${LOCK_FILE}; \ - fi; \ -fi;' EXIT +set -e -############################################################################ -# BEGIN of usage description -# -usage () +display_usage () { cat << EOF $0 Builds the Apex OPNFV Deployment Toolchain -usage: $0 [-s spec-file] [-c cache-URI] [-l log-file] [-f Flags] build-directory +usage: $0 [ -c cache_dir ] -r release_name [ --iso | --rpms ] OPTIONS: - -s spec-file ($BUILD_SPEC), define the build-spec file, default ../build/config.mk - -c cache base URI ($BUILD_CACHE_URI), specifies the base URI to a build cache to be used/updated - the name is automatically generated from the md5sum of the spec-file, http://, ftp://, file://[absolute path] suported. - - -l log-file ($BUILD_LOG), specifies the output log-file (stdout and stderr), if not specified logs are output to console as normal - -v version tag to be applied to the build result - -r alternative remote access method script/program. curl is default. - -t run small build-script unit test. - -T run large build-script unit test. - -f build flags ($BUILD_FLAGS): - o s: Do nothing, succeed - o f: Do nothing, fail - o t: run build unit tests - o M: Use master branch code - o i: run interactive (-t flag to docker run) - o P: Populate a new local cache and push it to the (-c cache-URI) cache artifactory if -c option is present, currently file://, http:// and ftp:// are supported - o d: Detatch - NOT YET SUPPORTED - - build-directory ($BUILD_DIR), specifies the directory for the output artifacts (.iso file). - + -c cache destination - directory of cached files, defaults to ./cache + -r release name/version of the build result + --iso build the iso (implies RPMs too) + --rpms build the rpms + --debug enable debug -h help, prints this help text -Description: -build.sh builds opnfv .iso artifact. -To reduce build time it uses build cache on a local or remote location. The cache is rebuilt and uploaded if either of the below conditions are met: -1) The P(opulate) flag is set and the -c cache-base-URI is provided, if -c is not provided the cache will stay local. -2) If the cache is invalidated by one of the following conditions: - - The config spec md5sum does not compare to the md5sum for the spec which the cache was built. - - The git Commit-Id on the remote repos/HEAD defined in the spec file does not correspont with the Commit-Id for what the cache was built with. -3) A valid cache does not exist on the specified -c cache-base-URI. - -The cache URI object name is apex_cache-"md5sum(spec file)" - -Logging by default to console, but can be directed elsewhere with the -l option in which case both stdout and stderr is redirected to that destination. - -Built in unit testing of components is enabled by adding the t(est) flag. - -Return codes: - - 0 Success! - - 1-99 Unspecified build error - - 100-199 Build system internal error (not build it self) - o 101 Build system instance busy - - 200 Build failure - -Examples: -build -c http://opnfv.org/artifactory/apex/cache -d ~/jenkins/genesis/apex/ci/output -f ti -NOTE: At current the build scope is set to the git root of the repository, -d destination locations outside that scope will not work +Example: +build -c file:///tmp/cache -r dev123 EOF } -# -# END of usage description -############################################################################ -############################################################################ -# BEGIN of variables to customize -# BUILD_BASE=$(readlink -e ../build/) -RESULT_DIR="${BUILD_BASE}/release" -BUILD_SPEC="${BUILD_BASE}/config.mk" +CACHE_DEST="" CACHE_DIR="cache" -LOCAL_CACHE_ARCH_NAME="apex-cache" -REMOTE_CACHE_ARCH_NAME="apex_cache-$(md5sum ${BUILD_SPEC}| cut -f1 -d " ")" -REMOTE_ACCESS_METHD=curl -INCLUDE_DIR=../include -# -# END of variables to customize -############################################################################ - -############################################################################ -# BEGIN of script assigned variables -# -SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -LOCK_FILE="${SCRIPT_DIR}/.build.lck" -CACHE_TMP="${SCRIPT_DIR}/tmp" -TEST_SUCCEED=0 -TEST_FAIL=0 -UNIT_TEST=0 -USE_MASTER=0 -UPDATE_CACHE=0 -POPULATE_CACHE=0 -RECURSIV=0 -DETACH=0 -DEBUG=0 -INTEGRATION_TEST=0 -FULL_INTEGRATION_TEST=0 -INTERACTIVE=0 -BUILD_CACHE_URI= -BUILD_SPEC= -BUILD_DIR= -BUILD_LOG= -BUILD_VERSION= -MAKE_ARGS= -# -# END of script assigned variables -############################################################################ - -############################################################################ -# BEGIN of include pragmas -# -source ${INCLUDE_DIR}/build.sh.debug -# -# END of include -############################################################################ - -############################################################################ -# BEGIN of main -# -while getopts "s:c:d:v:f:l:r:RtTh" OPTION -do - case $OPTION in - h) - usage - rc=0 - exit $rc - ;; - - s) - BUILD_SPEC=${OPTARG} - ;; - - c) - BUILD_CACHE_URI=${OPTARG} - ;; - - d) - BUILD_DIR=${OPTARG} - ;; - - l) - BUILD_LOG=${OPTARG} - ;; - - v) - BUILD_VERSION=${OPTARG} - ;; - - f) - BUILD_FLAGS=${OPTARG} - ;; - - r) REMOTE_ACCESS_METHD=${OPTARG} - ;; - - R) - RECURSIVE=1 - ;; - - t) - INTEGRATION_TEST=1 - ;; - - T) - INTEGRATION_TEST=1 - FULL_INTEGRATION_TEST=1 - ;; - - *) - echo "${OPTION} is not a valid argument" - rc=100 - exit $rc - ;; +CACHE_NAME="apex-cache" +MAKE_TARGET="images" + +parse_cmdline() { + while [ "${1:0:1}" = "-" ] + do + case "$1" in + -h|--help) + display_usage + exit 0 + ;; + -c|--cache-dir) + CACHE_DEST=${2} + shift 2 + ;; + -r|--release) + RELEASE=${2} + shift 2 + ;; + --iso ) + MAKE_TARGET="iso" + echo "Building opnfv-apex RPMs and ISO" + shift 1 + ;; + --rpms ) + MAKE_TARGET="rpms" + echo "Buiding opnfv-apex RPMs" + shift 1 + ;; + --debug ) + debug="TRUE" + echo "Enable debug output" + shift 1 + ;; + *) + display_usage + exit 1 + ;; esac -done - -if [ -z $BUILD_DIR ]; then - BUILD_DIR=$(echo $@ | cut -d ' ' -f ${OPTIND}) -fi - -for ((i=0; i<${#BUILD_FLAGS};i++)); do - case ${BUILD_FLAGS:$i:1} in - s) - rc=0 - exit $rc - ;; - - f) - rc=1 - exit $rc - ;; - - t) - UNIT_TEST=1 - ;; - - M) - USE_MASTER=1 - ;; + done - i) - INTERACTIVE=1 - ;; - - P) - POPULATE_CACHE=1 - ;; - - d) - DETACH=1 - echo "Detach is not yet supported - exiting ...." - rc=100 - exit $rc - ;; - - D) - DEBUG=1 - ;; - - *) - echo "${BUILD_FLAGS:$i:1} is not a valid build flag - exiting ...." - rc=100 - exit $rc - ;; - esac -done - -shift $((OPTIND-1)) - -if [ ${INTEGRATION_TEST} -eq 1 ]; then - integration-test - rc=0 - exit $rc -fi - -if [ ! -f ${BUILD_SPEC} ]; then - echo "spec file does not exist: $BUILD_SPEC - exiting ...." - rc=100 - exit $rc -fi - -if [ -z ${BUILD_DIR} ]; then - echo "Missing build directory - exiting ...." - rc=100 - exit $rc -fi - -if [ ! -z ${BUILD_LOG} ]; then - if [[ ${RECURSIVE} -ne 1 ]]; then - set +e - eval $0 -R $@ > ${BUILD_LOG} 2>&1 - rc=$? - set -e - if [ $rc -ne 0]; then - exit $rc - fi - fi -fi - -if [ ${TEST_SUCCEED} -eq 1 ]; then - sleep 1 - rc=0 - exit $rc -fi - -if [ ${TEST_FAIL} -eq 1 ]; then - sleep 1 - rc=1 - exit $rc -fi - -if [ -e ${LOCK_FILE} ]; then - echo "A build job is already running, exiting....." - rc=101 - exit $rc -fi - -echo $$ > ${LOCK_FILE} +} -if [ ! -z ${BUILD_CACHE_URI} ]; then - if [ ${POPULATE_CACHE} -ne 1 ]; then - rm -rf ${CACHE_TMP}/cache - mkdir -p ${CACHE_TMP}/cache - echo "Downloading cach file ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME} ..." - set +e - ${REMOTE_ACCESS_METHD} -o ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz - rc=$? - set -e - if [ $rc -ne 0 ]; then - echo "Remote cache does not exist, or is not accessible - a new cache will be built ..." - POPULATE_CACHE=1 - else - echo "Unpacking cache file ..." - tar --atime-preserve -C ${CACHE_TMP}/cache -xvf ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz - cp ${CACHE_TMP}/cache/cache/.versions ${BUILD_BASE}/. - set +e - make -C ${BUILD_BASE} validate-cache; - rc=$? - set -e +parse_cmdline "$@" - if [ $rc -ne 0 ]; then - echo "Cache invalid - a new cache will be built " - POPULATE_CACHE=1 - else - cp -rf ${CACHE_TMP}/cache/cache/. ${BUILD_BASE} - fi - rm -rf ${CACHE_TMP}/cache - fi - fi -fi +if [ -n "$RELEASE" ]; then MAKE_ARGS+="RELEASE=$RELEASE "; fi -if [ ${POPULATE_CACHE} -eq 1 ]; then - if [ ${DEBUG} -eq 0 ]; then - set +e - cd ${BUILD_BASE} && make clean - rc=$? - set -e - if [ $rc -ne 0 ]; then - echo "Build - make clean failed, exiting ..." - rc=100 - exit $rc - fi +# Get the Old Cache +if [ -n "$CACHE_DEST" ]; then + echo "Retrieving Cache" + if [ -f $CACHE_DEST/${CACHE_NAME}.tgz ]; then + rm -rf $BUILD_BASE/$CACHE_DIR + cp -f $CACHE_DEST/${CACHE_NAME}.tgz $BUILD_BASE/${CACHE_NAME}.tgz + tar xzf $BUILD_BASE/${CACHE_NAME}.tgz + elif [ ! -d $BUILD_BASE/$CACHE_DIR ]; then + mkdir $BUILD_BASE/$CACHE_DIR fi fi -if [ ! -z ${BUILD_VERSION} ]; then - MAKE_ARGS+="REVSTATE=${BUILD_VERSION} " +#create build_output for legecy functionality compatibiltiy in jenkins +if [[ ! -d ../build_output ]]; then + rm -f ../build_output + ln -s build/noarch/ ../build_output fi -if [ ${UNIT_TEST} -eq 1 ]; then - MAKE_ARGS+="UNIT_TEST=TRUE " -else - MAKE_ARGS+="UNIT_TEST=FALSE " -fi - -if [ ${USE_MASTER} -eq 1 ]; then - MAKE_ARGS+="USE_MASTER=-master " -fi +# Execute Make +make $MAKE_ARGS -C ${BUILD_BASE} $MAKE_TARGET +echo "Build Complete" -if [ ${INTERACTIVE} -eq 1 ]; then - MAKE_ARGS+="INTERACTIVE=TRUE " -else - MAKE_ARGS+="INTERACTIVE=FALSE " +# Build new Cache +if [ -n "$CACHE_DEST" ]; then + echo "Building Cache" + tar --atime-preserve --dereference -C $BUILD_BASE -caf $BUILD_BASE/${CACHE_NAME}.tgz $CACHE_DIR + echo "Copying Cache" + if [ ! -d $CACHE_DEST ]; then mkdir -p $CACHE_DEST; fi + cp $BUILD_BASE/${CACHE_NAME}.tgz $CACHE_DEST/${CACHE_NAME}.tgz fi - -MAKE_ARGS+=all - -if [ ${DEBUG} -eq 0 ]; then - set +e - cd ${BUILD_BASE} && make ${MAKE_ARGS} - rc=$? - set -e - if [ $rc -gt 0 ]; then - echo "Build: make all failed, exiting ..." - rc=200 - exit $rc - fi -else -debug_make -fi -set +e -make -C ${BUILD_BASE} prepare-cache -rc=$? -set -e - -if [ $rc -gt 0 ]; then - echo "Build: make prepare-cache failed - exiting ..." - rc=100 - exit $rc -fi -echo "Linking built OPNFV .iso file to target directory ${BUILD_DIR} ..." -rm -rf ${BUILD_DIR} -mkdir -p ${BUILD_DIR} -ln -s ${BUILD_BASE}/.versions ${BUILD_DIR} -ln -s ${RESULT_DIR}/*.iso* ${BUILD_DIR} -echo "Linking built OPNFV .rpm files to target directory ${BUILD_DIR} ..." -ln -s ${BUILD_BASE}/*.rpm ${BUILD_DIR} -ln -s ${BUILD_BASE}/noarch/*.rpm ${BUILD_DIR} - -if [ $POPULATE_CACHE -eq 1 ]; then - if [ ! -z ${BUILD_CACHE_URI} ]; then - echo "Building cache ..." - tar --atime-preserve --dereference -C ${BUILD_BASE} -caf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${CACHE_DIR} - echo "Uploading cache ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}" - ${REMOTE_ACCESS_METHD} -T ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz - rm ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz - fi -fi -echo "Success!!!" -exit 0 -# -# END of main -############################################################################ +echo "Complete" diff --git a/ci/clean.sh b/ci/clean.sh index f05b9136..58239cc6 100755 --- a/ci/clean.sh +++ b/ci/clean.sh @@ -17,7 +17,7 @@ CONFIG=/var/opt/opnfv source $CONFIG/lib/common-functions.sh vm_index=4 -ovs_bridges="brbm brbm1 brbm2 brbm3" +ovs_bridges="br-admin br-private br-public br-storage" # Clean off instack VM virsh destroy instack 2> /dev/null || echo -n '' virsh undefine instack --remove-all-storage 2> /dev/null || echo -n '' @@ -32,11 +32,11 @@ rm -f /var/lib/libvirt/images/instack.qcow2 2> /dev/null # Clean off baremetal VMs in case they exist for i in $(seq 0 $vm_index); do - virsh destroy baremetalbrbm_brbm1_brbm2_brbm3_$i 2> /dev/null || echo -n '' - virsh undefine baremetalbrbm_brbm1_brbm2_brbm3_$i --remove-all-storage 2> /dev/null || echo -n '' - /usr/bin/touch /var/lib/libvirt/images/baremetalbrbm_brbm1_brbm2_brbm3_${i}.qcow2 - virsh vol-delete baremetalbrbm_brbm1_brbm2_brbm3_${i}.qcow2 --pool default 2> /dev/null - rm -f /var/lib/libvirt/images/baremetalbrbm_brbm1_brbm2_brbm3_${i}.qcow2 2> /dev/null + virsh destroy baremetal$i 2> /dev/null || echo -n '' + virsh undefine baremetal$i --remove-all-storage 2> /dev/null || echo -n '' + /usr/bin/touch /var/lib/libvirt/images/baremetal${i}.qcow2 + virsh vol-delete baremetal${i}.qcow2 --pool default 2> /dev/null + rm -f /var/lib/libvirt/images/baremetal${i}.qcow2 2> /dev/null done # Clean off created bridges diff --git a/ci/deploy.sh b/ci/deploy.sh index 8589226b..504fd507 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -17,17 +17,10 @@ set -e ##VARIABLES -if [ "$TERM" != "unknown" ]; then - reset=$(tput sgr0) - blue=$(tput setaf 4) - red=$(tput setaf 1) - green=$(tput setaf 2) -else - reset="" - blue="" - red="" - green="" -fi +reset=$(tput sgr0 || echo "") +blue=$(tput setaf 4 || echo "") +red=$(tput setaf 1 || echo "") +green=$(tput setaf 2 || echo "") vm_index=4 interactive="FALSE" @@ -44,14 +37,14 @@ declare -A NET_MAP SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error) DEPLOY_OPTIONS="" -RESOURCES=/var/opt/opnfv/stack +RESOURCES=/var/opt/opnfv/images CONFIG=/var/opt/opnfv OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network" # Netmap used to map networks to OVS bridge names -NET_MAP['admin_network']="brbm" -NET_MAP['private_network']="brbm1" -NET_MAP['public_network']="brbm2" -NET_MAP['storage_network']="brbm3" +NET_MAP['admin_network']="br-admin" +NET_MAP['private_network']="br-private" +NET_MAP['public_network']="br-public" +NET_MAP['storage_network']="br-storage" ##FUNCTIONS ##translates yaml into variables @@ -402,15 +395,30 @@ function configure_deps { virsh_enabled_networks=$enabled_network_list fi - virsh net-list | grep default || virsh net-define /usr/share/libvirt/networks/default.xml - virsh net-list | grep -E "default\s+active" > /dev/null || virsh net-start default - virsh net-list | grep -E "default\s+active\s+yes" > /dev/null || virsh net-autostart --network default + # ensure default network is configured correctly + libvirt_dir="/usr/share/libvirt/networks" + virsh net-list --all | grep default || virsh net-define ${libvirt_dir}/default.xml + virsh net-list --all | grep -E "default\s+active" > /dev/null || virsh net-start default + virsh net-list --all | grep -E "default\s+active\s+yes" > /dev/null || virsh net-autostart --network default for network in ${OPNFV_NETWORK_TYPES}; do + echo "${blue}INFO: Creating Virsh Network: $network & OVS Bridge: ${NET_MAP[$network]}${reset}" ovs-vsctl list-br | grep ${NET_MAP[$network]} > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]} - virsh net-list --all | grep ${NET_MAP[$network]} > /dev/null || virsh net-define $CONFIG/${NET_MAP[$network]}-net.xml - virsh net-list | grep -E "${NET_MAP[$network]}\s+active" > /dev/null || virsh net-start ${NET_MAP[$network]} - virsh net-list | grep -E "${NET_MAP[$network]}\s+active\s+yes" > /dev/null || virsh net-autostart --network ${NET_MAP[$network]} + virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF + + $network + + + + +EOF + if ! (virsh net-list --all | grep $network > /dev/null); then + echo "${red}ERROR: unable to create network: ${network}${reset}" + exit 1; + fi + rm -f ${libvirt_dir}/apex-virsh-net.xml &> /dev/null; + virsh net-list | grep -E "$network\s+active" > /dev/null || virsh net-start $network + virsh net-list | grep -E "$network\s+active\s+yes" > /dev/null || virsh net-autostart --network $network done echo -e "${blue}INFO: Bridges set: ${reset}" @@ -441,21 +449,8 @@ function configure_deps { fi # ensure storage pool exists and is started - virsh pool-list --all | grep default > /dev/null || virsh pool-create $CONFIG/default-pool.xml - virsh pool-list | grep -Eo "default\s+active" > /dev/null || virsh pool-start default - - if virsh net-list | grep default > /dev/null; then - num_ints_same_subnet=$(ip addr show | grep "inet 192.168.122" | wc -l) - if [ "$num_ints_same_subnet" -gt 1 ]; then - virsh net-destroy default - ##go edit /etc/libvirt/qemu/networks/default.xml - sed -i 's/192.168.122/192.168.123/g' /etc/libvirt/qemu/networks/default.xml - sed -i 's/192.168.122/192.168.123/g' instackenv-virt.json - sleep 5 - virsh net-start default - virsh net-autostart default - fi - fi + virsh pool-list --all | grep default > /dev/null || virsh pool-define-as --name default dir --target /var/lib/libvirt/images + virsh pool-list | grep -Eo "default\s+active" > /dev/null || (virsh pool-autostart default; virsh pool-start default) if ! egrep '^flags.*(vmx|svm)' /proc/cpuinfo > /dev/null; then echo "${red}virtualization extensions not found, kvm kernel module insertion may fail.\n \ @@ -482,31 +477,30 @@ Are you sure you have enabled vmx in your bios or hypervisor?${reset}" ##params: none function setup_instack_vm { if ! virsh list --all | grep instack > /dev/null; then - #virsh vol-create default instack.qcow2.xml - virsh define $CONFIG/instack.xml - - #Upload instack image - #virsh vol-create default --file instack.qcow2.xml - virsh vol-create-as default instack.qcow2 30G --format qcow2 + undercloud_nets="default admin_network" + if [[ $enabled_network_list =~ "public_network" ]]; then + undercloud_nets+=" public_network" + fi + define_vm instack hd 30 "$undercloud_nets" ### this doesn't work for some reason I was getting hangup events so using cp instead - #virsh vol-upload --pool default --vol instack.qcow2 --file $CONFIG/stack/instack.qcow2 + #virsh vol-upload --pool default --vol undercloud.qcow2 --file $CONFIG/stack/undercloud.qcow2 #2015-12-05 12:57:20.569+0000: 8755: info : libvirt version: 1.2.8, package: 16.el7_1.5 (CentOS BuildSystem , 2015-11-03-13:56:46, worker1.bsys.centos.org) #2015-12-05 12:57:20.569+0000: 8755: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds #2015-12-05 12:57:20.569+0000: 8756: warning : virKeepAliveTimerInternal:143 : No response from client 0x7ff1e231e630 after 6 keepalive messages in 35 seconds - #error: cannot close volume instack.qcow2 + #error: cannot close volume undercloud.qcow2 #error: internal error: received hangup / error event on socket #error: Reconnected to the hypervisor local instack_dst=/var/lib/libvirt/images/instack.qcow2 - cp -f $RESOURCES/instack.qcow2 $instack_dst + cp -f $RESOURCES/undercloud.qcow2 $instack_dst # resize instack machine echo "Checking if instack needs to be resized..." instack_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $instack_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p') if [ "$instack_size" -lt 30 ]; then qemu-img resize /var/lib/libvirt/images/instack.qcow2 +25G - LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/instack.qcow2 $instack_dst + LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/undercloud.qcow2 $instack_dst LIBGUESTFS_BACKEND=direct virt-customize -a $instack_dst --run-command 'xfs_growfs -d /dev/sda1 || true' new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $instack_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p') if [ "$new_size" -lt 30 ]; then @@ -534,30 +528,22 @@ function setup_instack_vm { virsh start instack fi - sleep 3 # let DHCP happen + sleep 10 # let instack get started up + # get the instack VM IP CNT=10 echo -n "${blue}Waiting for instack's dhcp address${reset}" - while ! grep instack /var/lib/libvirt/dnsmasq/default.leases > /dev/null && [ $CNT -gt 0 ]; do + instack_mac=$(virsh domiflist instack | grep default | awk '{ print $5 }') + while ! $(arp -e | grep ${instack_mac} > /dev/null) && [ $CNT -gt 0 ]; do echo -n "." - sleep 3 - CNT=CNT-1 + sleep 10 + CNT=$((CNT-1)) done + UNDERCLOUD=$(arp -e | grep ${instack_mac} | awk {'print $1'}) - # get the instack VM IP - UNDERCLOUD=$(grep instack /var/lib/libvirt/dnsmasq/default.leases | awk '{print $3}' | head -n 1) if [ -z "$UNDERCLOUD" ]; then - #if not found then dnsmasq may be using leasefile-ro - instack_mac=$(virsh domiflist instack | grep default | \ - grep -Eo "[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+") - UNDERCLOUD=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'}) - - if [ -z "$UNDERCLOUD" ]; then - echo "\n\nNever got IP for Instack. Can Not Continue." - exit 1 - else - echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}" - fi + echo "\n\nCan't get IP for Instack. Can Not Continue." + exit 1 else echo -e "${blue}\rInstack VM has IP $UNDERCLOUD${reset}" fi @@ -567,7 +553,7 @@ function setup_instack_vm { while ! ping -c 1 $UNDERCLOUD > /dev/null && [ $CNT -gt 0 ]; do echo -n "." sleep 3 - CNT=$CNT-1 + CNT=$((CNT-1)) done if [ "$CNT" -eq 0 ]; then echo "Failed to contact Instack. Can Not Continue" @@ -577,7 +563,7 @@ function setup_instack_vm { while ! ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "echo ''" 2>&1> /dev/null && [ $CNT -gt 0 ]; do echo -n "." sleep 3 - CNT=$CNT-1 + CNT=$((CNT-1)) done if [ "$CNT" -eq 0 ]; then echo "Failed to connect to Instack. Can Not Continue" @@ -586,13 +572,9 @@ function setup_instack_vm { # extra space to overwrite the previous connectivity output echo -e "${blue}\r ${reset}" + sleep 1 + ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth2 | grep ${public_network_provisioner_ip} > /dev/null; then ip a a ${public_network_provisioner_ip}/${public_network_cidr##*/} dev eth2; ip link set up dev eth2; fi" - #add the instack public interface if net isolation is enabled (more than just admin network) - if [[ "$net_isolation_enabled" == "TRUE" ]]; then - virsh attach-interface --domain instack --type network --source ${NET_MAP['public_network']} --model rtl8139 --config --live - sleep 1 - ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth2 | grep ${public_network_provisioner_ip} > /dev/null; then ip a a ${public_network_provisioner_ip}/${public_network_cidr##*/} dev eth2; ip link set up dev eth2; fi" - fi # ssh key fix for stack user ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack" } @@ -600,24 +582,93 @@ function setup_instack_vm { ##Create virtual nodes in virsh ##params: none function setup_virtual_baremetal { + #start by generating the opening json for instackenv.json + cat > $CONFIG/instackenv-virt.json << EOF +{ + "nodes": [ +EOF + + # next create the virtual machines and add their definitions to the file for i in $(seq 0 $vm_index); do - if ! virsh list --all | grep baremetalbrbm_brbm1_brbm2_brbm3_${i} > /dev/null; then - if [ ! -e $CONFIG/baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml ]; then - define_virtual_node baremetalbrbm_brbm1_brbm2_brbm3_${i} - fi - # Fix for ramdisk using wrong pxeboot interface - # TODO: revisit this and see if there's a more proper fix - sed -i "/^\s*/{ - N - s/^\(.*\)virtio\(.*\)$/\1rtl8139\2/ - }" $CONFIG/baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml - virsh define $CONFIG/baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml + if ! virsh list --all | grep baremetal${i} > /dev/null; then + define_vm baremetal${i} network 41 'admin_network' + for n in private_network public_network storage_network; do + if [[ $enabled_network_list =~ $n ]]; then + echo -n "$n " + virsh attach-interface --domain baremetal${i} --type network --source $n --model rtl8139 --config + fi + done else echo "Found Baremetal ${i} VM, using existing VM" fi - virsh vol-list default | grep baremetalbrbm_brbm1_brbm2_brbm3_${i} 2>&1> /dev/null || virsh vol-create-as default baremetalbrbm_brbm1_brbm2_brbm3_${i}.qcow2 40G --format qcow2 + #virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2 + mac=$(virsh domiflist baremetal${i} | grep admin_network | awk '{ print $5 }') + + cat >> $CONFIG/instackenv-virt.json << EOF + { + "pm_addr": "192.168.122.1", + "pm_user": "root", + "pm_password": "INSERT_STACK_USER_PRIV_KEY", + "pm_type": "pxe_ssh", + "mac": [ + "$mac" + ], + "cpu": "2", + "memory": "8192", + "disk": "41", + "arch": "x86_64" + }, +EOF done + #truncate the last line to remove the comma behind the bracket + tail -n 1 $CONFIG/instackenv-virt.json | wc -c | xargs -I {} truncate $CONFIG/instackenv-virt.json -s -{} + + #finally reclose the bracket and close the instackenv.json file + cat >> $CONFIG/instackenv-virt.json << EOF + } + ], + "arch": "x86_64", + "host-ip": "192.168.122.1", + "power_manager": "nova.virt.baremetal.virtual_power_driver.VirtualPowerManager", + "seed-ip": "", + "ssh-key": "INSERT_STACK_USER_PRIV_KEY", + "ssh-user": "root" +} +EOF +} + +##Create virtual nodes in virsh +##params: name - String: libvirt name for VM +## bootdev - String: boot device for the VM +## disksize - Number: size of the disk in Gig +## ovs_bridges: - List: list of ovs bridges +function define_vm () { + # Create the libvirt storage volume + if virsh vol-list default | grep ${1}.qcow2 2>&1> /dev/null; then + volume_path=$(virsh vol-path --pool default ${1}.qcow2 || echo "/var/lib/libvirt/images/${1}.qcow2") + echo "Volume ${1} exists. Deleting Existing Volume $volume_path" + virsh vol-dumpxml ${1}.qcow2 --pool default + touch $volume_path + virsh vol-delete ${1}.qcow2 --pool default + fi + virsh vol-create-as default ${1}.qcow2 ${3}G --format qcow2 + volume_path=$(virsh vol-path --pool default ${1}.qcow2) + if [ ! -f $volume_path ]; then + echo "$volume_path Not created successfully... Aborting" + exit 1 + fi + + # create the VM + /usr/libexec/openstack-tripleo/configure-vm --name $1 \ + --bootdev $2 \ + --image "$volume_path" \ + --diskbus sata \ + --arch x86_64 \ + --cpus 2 \ + --memory 8388608 \ + --libvirt-nic-driver virtio \ + --baremetal-interface $4 } ##Set network-environment settings @@ -666,11 +717,7 @@ function configure_network_environment { # check for ODL L3 if [ "${deploy_options_array['sdn_l3']}" == 'true' ]; then - nic_ext+=_br-ex - fi - - if [ "${deploy_options_array['sdn_controller']}" == 'onos' ]; then - nic_ext+=_no-public-ip + nic_ext+=_br-ex_no-public-ip fi # set nics appropriately @@ -701,21 +748,6 @@ function configure_undercloud { # vm power on the hypervisor ssh ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "cat /home/stack/.ssh/id_rsa.pub" >> /root/.ssh/authorized_keys - # fix MACs to match new setup - for i in $(seq 0 $vm_index); do - pyscript="import json -data = json.load(open('$CONFIG/instackenv-virt.json')) -print data['nodes'][$i]['mac'][0]" - - old_mac=$(python -c "$pyscript") - new_mac=$(virsh dumpxml baremetalbrbm_brbm1_brbm2_brbm3_$i | grep "mac address" | cut -d = -f2 | grep -Eo "[0-9a-f:]+") - # this doesn't work with multiple vnics on the vms - #if [ "$old_mac" != "$new_mac" ]; then - # echo "${blue}Modifying MAC for node from $old_mac to ${new_mac}${reset}" - # sed -i 's/'"$old_mac"'/'"$new_mac"'/' $CONFIG/instackenv-virt.json - #fi - done - DEPLOY_OPTIONS+=" --libvirt-type qemu" INSTACKENV=$CONFIG/instackenv-virt.json @@ -777,7 +809,13 @@ cat << 'EOF' | sudo tee /usr/share/diskimage-builder/elements/yum/bin/install-pa exit 0 EOF -openstack undercloud install &> apex-undercloud-install.log +openstack undercloud install &> apex-undercloud-install.log || { + # cat the undercloud install log incase it fails + echo "ERROR: openstack undercloud install has failed. Dumping Log:" + cat apex-undercloud-install.log + exit 1 +} + sleep 30 sudo systemctl restart openstack-glance-api sudo systemctl restart openstack-nova-conductor @@ -818,7 +856,7 @@ function undercloud_prep_overcloud_deploy { SDN_IMAGE=opendaylight elif [ "${deploy_options_array['sdn_controller']}" == 'onos' ]; then DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/onos.yaml" - SDN_IMAGE=opendaylight + SDN_IMAGE=onos elif [ "${deploy_options_array['sdn_controller']}" == 'opencontrail' ]; then echo -e "${red}ERROR: OpenContrail is currently unsupported...exiting${reset}" exit 1 @@ -1110,6 +1148,7 @@ parse_cmdline() { ;; --no-ha ) ha_enabled="FALSE" + vm_index=1 echo "HA Deployment Disabled" shift 1 ;; diff --git a/config/deploy/network/network_settings.yaml b/config/deploy/network/network_settings.yaml deleted file mode 100644 index 88bb3b58..00000000 --- a/config/deploy/network/network_settings.yaml +++ /dev/null @@ -1,100 +0,0 @@ -# This configuration file defines Network Environment for a -# Baremetal Deployment of OPNFV. It contains default values -# for 4 following networks: -# -# - admin -# - private* -# - public -# - storage* -# -# *) optional networks -# -# Any values missing from this configuration file will be -# auto-detected by deployment script from the existing network -# configuration of the jumphost. -# -# Optional networks will be consolidated with the admin network -# if not explicitely configured. -# -# See short description of the networks in the comments below. -# - -# "admin" is the short name for Control Plane Network. -# During OPNFV deployment it is used for node provisioning so -# PXE boot should be enabled for the related interfaces on all -# the nodes in the OPNFV cluster. After the deployment this -# network is used as the OpenStack management network which -# carries e.g. communication between its internal components. -# -admin_network: - enabled: true - network_type: bridged - bridged_interface: '' - bond_interfaces: '' - vlan: native - usable_ip_range: 192.0.2.11,192.0.2.99 - gateway: 192.0.2.1 - provisioner_ip: 192.0.2.1 - cidr: 192.0.2.0/24 - dhcp_range: 192.0.2.2,192.0.2.10 - introspection_range: 192.0.2.100,192.0.2.120 - -# "private" is an optional network used as underlying physical -# network for virtual provider and tenant networks created by -# users. Traffic between virtual machines is carried by this -# network. -# -private_network: - enabled: true - cidr: 11.0.0.0/24 - -# "public" network is used for external connectivity. -# The external network provides Internet access for virtual -# machines. If floating IP range is defined for this network, -# floating IP addresses can be used for accessing virtual -# machines from outside of OPNFV cluster. Also external REST -# API calls use this network. -# -public_network: - enabled: true - network_type: '' - bridged_interface: '' - cidr: 192.168.37.0/24 - gateway: 192.168.37.1 - floating_ip_range: 192.168.37.200,192.168.37.220 - usable_ip_range: 192.168.37.10,192.168.37.199 - provisioner_ip: 192.168.37.1 - -# "storage" is an optional network used by storage backends. -# You can configure this network in order to reduce load on -# Control Plane Network. -# -storage_network: - enabled: true - cidr: 12.0.0.0/24 - -#admin_network: -# enabled: true -# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond -# bridged_interface: '' #Interface to bridge to for installer VM -# bond_interfaces: '' #Interfaces to create bond with for installer VM -# vlan: native #VLAN tag to use, native means none -# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20 -# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected -# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet -# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected -# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned -# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes) -#private_network: -# enabled: false #If disabled, internal api traffic will collapse to admin_network -#public_network: -# enabled: true #If disabled, public_network traffic will collapse to admin network -# network_type: '' -# bridged_interface: '' -# cidr: 192.168.37.0/24 -# gateway: 192.168.37.1 -# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron -# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet -# provisioner_ip: 192.168.37.1 -#storage_network: -# enabled: false #If disabled, storage_network traffic will collapse to admin network diff --git a/config/network/network_settings.yaml b/config/network/network_settings.yaml new file mode 100644 index 00000000..88bb3b58 --- /dev/null +++ b/config/network/network_settings.yaml @@ -0,0 +1,100 @@ +# This configuration file defines Network Environment for a +# Baremetal Deployment of OPNFV. It contains default values +# for 4 following networks: +# +# - admin +# - private* +# - public +# - storage* +# +# *) optional networks +# +# Any values missing from this configuration file will be +# auto-detected by deployment script from the existing network +# configuration of the jumphost. +# +# Optional networks will be consolidated with the admin network +# if not explicitely configured. +# +# See short description of the networks in the comments below. +# + +# "admin" is the short name for Control Plane Network. +# During OPNFV deployment it is used for node provisioning so +# PXE boot should be enabled for the related interfaces on all +# the nodes in the OPNFV cluster. After the deployment this +# network is used as the OpenStack management network which +# carries e.g. communication between its internal components. +# +admin_network: + enabled: true + network_type: bridged + bridged_interface: '' + bond_interfaces: '' + vlan: native + usable_ip_range: 192.0.2.11,192.0.2.99 + gateway: 192.0.2.1 + provisioner_ip: 192.0.2.1 + cidr: 192.0.2.0/24 + dhcp_range: 192.0.2.2,192.0.2.10 + introspection_range: 192.0.2.100,192.0.2.120 + +# "private" is an optional network used as underlying physical +# network for virtual provider and tenant networks created by +# users. Traffic between virtual machines is carried by this +# network. +# +private_network: + enabled: true + cidr: 11.0.0.0/24 + +# "public" network is used for external connectivity. +# The external network provides Internet access for virtual +# machines. If floating IP range is defined for this network, +# floating IP addresses can be used for accessing virtual +# machines from outside of OPNFV cluster. Also external REST +# API calls use this network. +# +public_network: + enabled: true + network_type: '' + bridged_interface: '' + cidr: 192.168.37.0/24 + gateway: 192.168.37.1 + floating_ip_range: 192.168.37.200,192.168.37.220 + usable_ip_range: 192.168.37.10,192.168.37.199 + provisioner_ip: 192.168.37.1 + +# "storage" is an optional network used by storage backends. +# You can configure this network in order to reduce load on +# Control Plane Network. +# +storage_network: + enabled: true + cidr: 12.0.0.0/24 + +#admin_network: +# enabled: true +# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond +# bridged_interface: '' #Interface to bridge to for installer VM +# bond_interfaces: '' #Interfaces to create bond with for installer VM +# vlan: native #VLAN tag to use, native means none +# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20 +# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected +# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet +# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected +# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned +# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes) +#private_network: +# enabled: false #If disabled, internal api traffic will collapse to admin_network +#public_network: +# enabled: true #If disabled, public_network traffic will collapse to admin network +# network_type: '' +# bridged_interface: '' +# cidr: 192.168.37.0/24 +# gateway: 192.168.37.1 +# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron +# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet +# provisioner_ip: 192.168.37.1 +#storage_network: +# enabled: false #If disabled, storage_network traffic will collapse to admin network diff --git a/docs/installation-instructions/virtualinstall.rst b/docs/installation-instructions/virtualinstall.rst index b31a6af0..7232952d 100644 --- a/docs/installation-instructions/virtualinstall.rst +++ b/docs/installation-instructions/virtualinstall.rst @@ -63,24 +63,5 @@ Verifying the Setup - VMs To verify the set you can follow the instructions in the `Verifying the Setup`_ section. -Before you get started following these instructions you will need to add IP addresses on the networks that have been -created for the External and provisioning networks. By default the External network is 192.168.37.0/24 and the -provisioning network is 192.0.2.0/24. To access these networks simply add an IP to brbm and brbm1 and set their link to -up. This will provide a route from the hypervisor into the virtual networks acting as OpenStack's underlay network in -the virtual deployment. - -| ``ip addr add 192.0.2.252/24 dev brbm`` -| ``ip link set up dev brbm`` -| ``ip addr add 192.168.37.252/24 dev brbm1`` -| ``ip link set up dev brbm1`` - -Once these IP addresses are assigned and the links are up the gateways on the overcloud's networks should be pingable -and read to be SSHed to. - -| ``ping 192.0.2.1`` -| ``ping 192.168.37.1`` - -Now continue with the `Verifying the Setup`_ section. - .. _`Install Bare Metal Jumphost`: baremetal.html .. _`Verifying the Setup`: verification.html diff --git a/include/build.sh.debug b/include/build.sh.debug deleted file mode 100644 index e69de29b..00000000 diff --git a/lib/installer/onos/onos_gw_mac_update.sh b/lib/installer/onos/onos_gw_mac_update.sh index d003cc01..323021db 100644 --- a/lib/installer/onos/onos_gw_mac_update.sh +++ b/lib/installer/onos/onos_gw_mac_update.sh @@ -38,7 +38,7 @@ EOI fi # get gateway mac - GW_MAC=$(arping ${GW_IP} -c 1 -I brbm2 | grep -Eo '([0-9a-fA-F]{2})(([/\s:-][0-9a-fA-F]{2}){5})') + GW_MAC=$(arping ${GW_IP} -c 1 -I br-public | grep -Eo '([0-9a-fA-F]{2})(([/\s:-][0-9a-fA-F]{2}){5})') if [ -z "$GW_MAC" ]; then echo "ERROR: Failed to find gateway mac for ${GW_IP}" -- cgit 1.2.3-korg From 4b5e79294eecf7e10bfb1459c55f9186312c32bf Mon Sep 17 00:00:00 2001 From: randyl Date: Tue, 22 Mar 2016 16:02:04 -0600 Subject: Allow 2+ compute node for none HA installs APEX-117 Changed the number of compute nodes to no longer be hard coded to 1 and now calculate to be total_nodes -1 for non-HA deployments. Rebased to new build. Change-Id: I8dc135876a8b436714806b79d4193d225761534d Signed-off-by: randyl --- ci/deploy.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/deploy.sh b/ci/deploy.sh index 504fd507..f08ce524 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -892,7 +892,7 @@ function undercloud_prep_overcloud_deploy { compute_nodes=$((total_nodes - 3)) DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml" else - compute_nodes=1 + compute_nodes=$((total_nodes - 1)) fi if [ "$compute_nodes" -le 0 ]; then -- cgit 1.2.3-korg