summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile82
-rw-r--r--[-rwxr-xr-x]common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/check_feature.sh (renamed from fuel/deploy/install-ubuntu-packages.sh)20
-rw-r--r--common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh20
-rw-r--r--common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh48
-rw-r--r--fuel/build/Makefile2
-rwxr-xr-xfuel/build/f_lith_odl_docker/Makefile52
-rwxr-xr-xfuel/build/f_lith_odl_docker/dockerfile/Dockerfile72
-rwxr-xr-xfuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh8
-rwxr-xr-xfuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh17
-rwxr-xr-xfuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh38
-rw-r--r--fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp81
-rw-r--r--fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh18
-rw-r--r--fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh20
-rw-r--r--fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh48
-rw-r--r--fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh219
-rwxr-xr-xfuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh192
-rwxr-xr-xfuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh54
-rwxr-xr-xfuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh95
-rw-r--r--fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh164
-rw-r--r--fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh146
-rwxr-xr-xfuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh90
-rw-r--r--fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh23
-rw-r--r--fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp2
-rw-r--r--fuel/deploy/README.txt106
-rw-r--r--fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dea.yaml993
-rw-r--r--fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dha.yaml54
-rw-r--r--fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dea.yaml (renamed from fuel/deploy/baremetal/dea.yaml)91
-rw-r--r--fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dha.yaml54
-rw-r--r--fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml950
-rw-r--r--fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dha.yaml49
-rw-r--r--fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml950
-rw-r--r--fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dha.yaml49
-rw-r--r--fuel/deploy/baremetal/dha.yaml53
-rw-r--r--fuel/deploy/baremetal/vms/fuel.xml (renamed from fuel/deploy/baremetal/vm/vFuel)2
-rw-r--r--fuel/deploy/baremetal/vms/fuel_lf.xml93
-rw-r--r--fuel/deploy/cloud/configure_nodes.py6
-rw-r--r--fuel/deploy/cloud/deploy.py44
-rw-r--r--fuel/deploy/cloud/deployment.py12
-rw-r--r--fuel/deploy/common.py21
-rw-r--r--fuel/deploy/dea.py2
-rw-r--r--fuel/deploy/deploy.py150
-rw-r--r--fuel/deploy/deploy_env.py6
-rw-r--r--fuel/deploy/dha_adapters/hardware_adapter.py9
-rw-r--r--fuel/deploy/dha_adapters/hp_adapter.py4
-rw-r--r--fuel/deploy/dha_adapters/ipmi_adapter.py50
-rw-r--r--fuel/deploy/dha_adapters/libvirt_adapter.py6
-rw-r--r--fuel/deploy/environments/__init__.py1
-rw-r--r--fuel/deploy/environments/execution_environment.py67
-rw-r--r--fuel/deploy/environments/libvirt_environment.py93
-rw-r--r--fuel/deploy/environments/virtual_fuel.py60
-rw-r--r--fuel/deploy/install_fuel_master.py33
-rw-r--r--fuel/deploy/libvirt/conf/ha/dea.yaml976
-rw-r--r--fuel/deploy/libvirt/conf/ha/dha.yaml42
-rw-r--r--fuel/deploy/libvirt/conf/multinode/dea.yaml (renamed from fuel/deploy/libvirt/dea.yaml)56
-rw-r--r--fuel/deploy/libvirt/conf/multinode/dha.yaml42
-rw-r--r--fuel/deploy/libvirt/dha.yaml80
-rw-r--r--fuel/deploy/libvirt/networks/fuel1.xml (renamed from fuel/deploy/libvirt/networks/fuel1)0
-rw-r--r--fuel/deploy/libvirt/networks/fuel2.xml (renamed from fuel/deploy/libvirt/networks/fuel2)0
-rw-r--r--fuel/deploy/libvirt/networks/fuel3.xml (renamed from fuel/deploy/libvirt/networks/fuel3)0
-rw-r--r--fuel/deploy/libvirt/networks/fuel4.xml (renamed from fuel/deploy/libvirt/networks/fuel4)0
-rw-r--r--fuel/deploy/libvirt/vms/compute.xml (renamed from fuel/deploy/libvirt/vms/compute)2
-rw-r--r--fuel/deploy/libvirt/vms/controller.xml (renamed from fuel/deploy/libvirt/vms/controller)2
-rw-r--r--fuel/deploy/libvirt/vms/fuel.xml (renamed from fuel/deploy/libvirt/vms/fuel-master)2
-rw-r--r--fuel/deploy/reap.py330
-rw-r--r--fuel/deploy/setup_environment.py165
-rw-r--r--fuel/deploy/setup_execution_environment.py36
-rw-r--r--fuel/deploy/setup_vfuel.py143
-rw-r--r--fuel/deploy/ssh_client.py10
68 files changed, 6692 insertions, 713 deletions
diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile
new file mode 100644
index 0000000..6d7535d
--- /dev/null
+++ b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile
@@ -0,0 +1,82 @@
+####################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# DOCKER FILE FOR LITHIUM ODL RC0 Testing
+#
+#############################################################################
+
+
+#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff
+FROM ubuntu:12.04
+
+# Maintainer Info
+MAINTAINER Daniel Smith
+
+
+#Run apt-get update one start just to check for updates when building
+RUN echo "Updating APT"
+RUN apt-get update
+RUN echo "Adding wget"
+RUN apt-get install -y wget
+RUN apt-get install -y net-tools
+RUN apt-get install -y openjdk-7-jre
+RUN apt-get install -y openjdk-7-jdk
+RUN apt-get install -y openssh-server
+RUN apt-get install -y vim
+RUN apt-get install -y expect
+RUN apt-get install -y daemontools
+RUN mkdir -p /opt/odl_source/lithium
+RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc'
+
+
+
+#Now lets got and fetch the ODL distribution
+RUN echo "Fetching Lithium Rc0"
+RUN wget https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/org/opendaylight/integration/distribution-karaf/0.3.0-SNAPSHOT/distribution-karaf-0.3.0-20150612.144348-2492.tar.gz -O /opt/odl_source/lithium/distribution-karaf-0.3.0-Lithium-RC0.tar.gz
+
+RUN echo "Untarring ODL inplace"
+RUN mkdir -p /opt/odl/lithium
+RUN tar zxvf /opt/odl_source/lithium/distribution-karaf-0.3.0-Lithium-RC0.tar.gz -C /opt/odl/lithium
+
+RUN echo "Installing DLUX and other features into ODL"
+#COPY dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh
+COPY container_scripts/start_odl_docker_container.sh /etc/init.d/
+COPY container_scripts/speak.sh /etc/init.d/
+#COPY dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh
+RUN chmod 777 /etc/init.d/start_odl_docker_container.sh
+RUN chmod 777 /etc/init.d/speak.sh
+
+
+
+# Expose the ports
+
+# PORTS FOR BASE SYSTEM AND DLUX
+EXPOSE 8101
+EXPOSE 6633
+EXPOSE 1099
+EXPOSE 43506
+EXPOSE 8181
+EXPOSE 8185
+EXPOSE 9000
+EXPOSE 39378
+EXPOSE 33714
+EXPOSE 44444
+EXPOSE 6653
+
+# PORTS FOR OVSDB AND ODL CONTROL
+EXPOSE 12001
+EXPOSE 6640
+EXPOSE 8080
+EXPOSE 7800
+EXPOSE 55130
+EXPOSE 52150
+EXPOSE 36826
+
+# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable
+CMD ["/etc/init.d/start_odl_docker_container.sh"]
diff --git a/fuel/deploy/install-ubuntu-packages.sh b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/check_feature.sh
index 1ebd7c0..04d7b53 100755..100644
--- a/fuel/deploy/install-ubuntu-packages.sh
+++ b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/check_feature.sh
@@ -1,18 +1,18 @@
-#!/bin/bash
##############################################################################
# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
+# daniel.smith@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Tools for installation on the libvirt server/base host
-#
-apt-get install -y libvirt-bin qemu-kvm tightvncserver virt-manager \
- sshpass fuseiso genisoimage blackbox xterm python-yaml python-netaddr \
- python-paramiko python-lxml python-pip
-pip install scp
-restart libvirt-bin \ No newline at end of file
+#!/usr/bin/expect
+spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client
+expect "root>"
+send "feature:list | grep -i odl-restconf\r"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
+
+
diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh
new file mode 100644
index 0000000..a7d0e6c
--- /dev/null
+++ b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/expect
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
+# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
+#################################################################################
+
+spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client
+expect "root>"
+send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh
new file mode 100644
index 0000000..96a40ec
--- /dev/null
+++ b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
+# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
+#################################################################################
+# Start up script for calling karaf / ODL inside a docker container.
+#
+# This script will also call a couple expect scripts to load the feature set that we want
+
+
+#ENV
+export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
+
+#MAIN
+echo "Starting up the da Sheilds..."
+/opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/karaf server &
+echo "Sleeping 5 bad hack"
+sleep 10
+echo "should see stuff listening now"
+netstat -na
+echo " should see proess running for karaf"
+ps -efa
+echo " Starting the packages we want"
+/etc/init.d/speak.sh
+echo "Printout the status - if its right, you should see 8181 appear now"
+netstat -na
+ps -efa
+
+
+
+## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute
+## Cheap - but effective
+while true;
+do
+ echo "Checking status of ODL:"
+ /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/status
+ sleep 60
+done
+
+
diff --git a/fuel/build/Makefile b/fuel/build/Makefile
index 5f63120..6c98ed9 100644
--- a/fuel/build/Makefile
+++ b/fuel/build/Makefile
@@ -43,6 +43,7 @@ SUBDIRS += f_l23network
SUBDIRS += f_resolvconf
SUBDIRS += f_ntp
SUBDIRS += f_odl_docker
+SUBDIRS += f_lith_odl_docker
#SUBDIRS += f_odl
# f_example is only an example of how to generate a .deb package and
@@ -64,6 +65,7 @@ all:
@echo "cache.mk" $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") >> $(VERSION_FILE)
@echo "config.mk" $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") >> $(VERSION_FILE)
$(MAKE) -C f_odl_docker -f Makefile all
+ $(MAKE) -C f_lith_odl_docker -f Makefile all
@make -C docker
@docker/runcontext $(DOCKERIMG) $(MAKE) $(MAKEFLAGS) iso
diff --git a/fuel/build/f_lith_odl_docker/Makefile b/fuel/build/f_lith_odl_docker/Makefile
new file mode 100755
index 0000000..e89da94
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/Makefile
@@ -0,0 +1,52 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TOP := $(shell pwd)
+BUILDTAG := robust_stefan
+RELEASE := Lithium_rc0
+
+# Edit this to match the GENESIS / OPNFV in your environment
+export OPNFV_PUPPET := $(BUILD_BASE)/../../common/puppet-opnfv
+include ../config.mk
+
+.PHONY: all
+all:
+ @mkdir -p puppet/modules/opnfv/odl_docker/${RELEASE}
+ @rm -rf tmp
+ @mkdir -p tmp
+ @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/${RELEASE}/dockerfile tmp/.
+ @docker build -t ${BUILDTAG} tmp/dockerfile/.
+ @docker save ${BUILDTAG} > puppet/modules/opnfv/odl_docker/${RELEASE}/odl_docker_image.tar
+ @wget ${DOCKER_REPO}/${DOCKER_TAG} -O puppet/modules/opnfv/odl_docker/${RELEASE}/docker-latest
+ @echo "OPFNV_PUPPET is: ${OPNFV_PUPPET}"
+ @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/${RELEASE}/dockerfile/container_scripts puppet/modules/opnfv/odl_docker/${RELEASE}/.
+
+.PHONY: clean
+clean:
+ @rm -rf tmp
+ @rm -rf release
+
+.PHONY: build-clean
+build-clean:
+ @rm -rf tmp
+ @rm -rf release
+ @rm -rf puppet/modules/opnfv/odl_docker/${RELEASE}/odl_docker_image.tar
+ @rm -rf puppet/modules/opnfv/odl_docker/${RELEASE}/docker-latest
+
+.PHONY: validate-cache
+validate-cache:
+ @echo "No cache validation schema available for $(shell pwd)"
+ @echo "Continuing ..."
+
+.PHONY: release
+release:
+ # Fetch PP from OPNFV Common
+ @cp -Rvp ${OPNFV_PUPPET}/manifests/odl_docker.pp ${PUPPET_DEST}
+ @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_lith_odl_docker/dockerfile/Dockerfile b/fuel/build/f_lith_odl_docker/dockerfile/Dockerfile
new file mode 100755
index 0000000..e3c7ee5
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/dockerfile/Dockerfile
@@ -0,0 +1,72 @@
+####################################################################
+#
+# Dockerfile to build a ODL (Karaf) Docker Container
+#
+# Copyright daniel.smith@ericsson.com
+# License: Apache GPL
+#
+####################################################################
+
+
+#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff
+FROM ubuntu:12.04
+
+# Maintainer Info
+MAINTAINER Daniel Smith
+
+#Run apt-get update one start just to check for updates when building
+RUN echo "Updating APT"
+RUN apt-get update
+RUN echo "Adding wget"
+RUN apt-get install -y wget
+RUN apt-get install -y net-tools
+RUN apt-get install -y openjdk-7-jre
+RUN apt-get install -y openjdk-7-jdk
+RUN apt-get install -y openssh-server
+RUN apt-get install -y vim
+RUN apt-get install -y expect
+RUN apt-get install -y daemontools
+RUN mkdir -p /opt/odl_source
+RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc'
+
+
+#Now lets got and fetch the ODL distribution
+RUN echo "Fetching ODL"
+RUN wget https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.3-Helium-SR3/distribution-karaf-0.2.3-Helium-SR3.tar.gz -O /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz
+
+RUN echo "Untarring ODL inplace"
+RUN mkdir -p /opt/odl
+RUN tar zxvf /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz -C /opt/odl
+
+RUN echo "Installing DLUX and other features into ODL"
+COPY tmp/dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh
+COPY tmp/dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh
+RUN chmod 777 /etc/init.d/start_odl_docker.sh
+RUN chmod 777 /etc/init.d/speak.sh
+
+
+# Expose the ports
+# PORTS FOR BASE SYSTEM AND DLUX
+EXPOSE 8101
+EXPOSE 6633
+EXPOSE 1099
+EXPOSE 43506
+EXPOSE 8181
+EXPOSE 8185
+EXPOSE 9000
+EXPOSE 39378
+EXPOSE 33714
+EXPOSE 44444
+EXPOSE 6653
+
+# PORTS FOR OVSDB AND ODL CONTROL
+EXPOSE 12001
+EXPOSE 6640
+EXPOSE 8080
+EXPOSE 7800
+EXPOSE 55130
+EXPOSE 52150
+EXPOSE 36826
+
+# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable
+CMD ["/etc/init.d/start_odl_docker.sh"]
diff --git a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh
new file mode 100755
index 0000000..3e5d0b2
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/expect
+spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
+expect "root>"
+send "feature:list | grep -i odl-restconf\r"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
+
diff --git a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh
new file mode 100755
index 0000000..3ba07a8
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/expect
+# Ericsson Research Canada
+#
+# Author: Daniel Smith <daniel.smith@ericsson.com>
+#
+# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
+#
+# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
+# DEPRECATED AFTER ARNO
+
+spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
+expect "root>"
+send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
+
diff --git a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh
new file mode 100755
index 0000000..1c72dda
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# Ericsson Research Canada
+#
+# Author: Daniel Smith <daniel.smith@ericsson.com>
+#
+# Start up script for calling karaf / ODL inside a docker container.
+#
+# This script will also call a couple expect scripts to load the feature set that we want
+
+
+#ENV
+export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
+
+#MAIN
+echo "Starting up the da Sheilds..."
+/opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/karaf server &
+echo "Sleeping 5 bad hack"
+sleep 10
+echo "should see stuff listening now"
+netstat -na
+echo " should see proess running for karaf"
+ps -efa
+echo " Starting the packages we want"
+/etc/init.d/speak.sh
+echo "Printout the status - if its right, you should see 8181 appear now"
+netstat -na
+ps -efa
+
+
+
+## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute
+## Cheap - but effective
+while true;
+do
+ echo "Checking status of ODL:"
+ /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/status
+ sleep 60
+done
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp
new file mode 100644
index 0000000..cd243ef
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp
@@ -0,0 +1,81 @@
+class opnfv::odl_lith_docker
+{
+ case $::fuel_settings['role'] {
+ /controller/: {
+
+ file { '/opt':
+ ensure => 'directory',
+ }
+
+ file { '/opt/opnfv':
+ ensure => 'directory',
+ owner => 'root',
+ group => 'root',
+ mode => 777,
+ }
+
+ file { '/opt/opnfv/odl':
+ ensure => 'directory',
+ }
+
+ file { '/opt/opnfv/odl/lithium':
+ ensure => 'directory',
+ }
+
+ file { '/opt/opnfv/odl/lithium/odl_docker_image.tar':
+ ensure => present,
+ source => '/etc/puppet/modules/opnfv/odl_docker/Lithium_rc0/odl_docker_image.tar',
+ mode => 750,
+ }
+
+ file { '/opt/opnfv/odl/lithium/docker-latest':
+ ensure => present,
+ source => '/etc/puppet/modules/opnfv/odl_docker/Lithium_rc0/docker-latest',
+ mode => 750,
+ }
+
+ file { '/opt/opnfv/odl/start_odl_conatiner.sh':
+ ensure => present,
+ source => '/etc/puppet/modules/opnfv/scripts/start_odl_container.sh',
+ mode => 750,
+ }
+ file { '/opt/opnfv/odl/stage_odl.sh':
+ ensure => present,
+ source => '/etc/puppet/modules/opnfv/scripts/stage_odl.sh',
+ mode => 750,
+ }
+ file { '/opt/opnfv/odl/config_net_odl.sh':
+ ensure => present,
+ source => '/etc/puppet/modules/opnfv/scripts/config_net_odl.sh',
+ mode => 750,
+ }
+ file { '/opt/opnfv/odl/change.sh':
+ ensure => present,
+ source => '/etc/puppet/modules/opnfv/scripts/change.sh',
+ mode => 750,
+ }
+
+
+ # fix failed to find the cgroup root issue
+ # https://github.com/docker/docker/issues/8791
+ case $::operatingsystem {
+ 'ubuntu': {
+ package {'cgroup-lite':
+ ensure => present,
+ }
+
+ service {'cgroup-lite':
+ ensure => running,
+ enable => true,
+ require => Package['cgroup-lite'],
+ }
+ }
+ 'centos': {
+ package {'docker-io':
+ ensure => latest,
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh
new file mode 100644
index 0000000..04d7b53
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+#!/usr/bin/expect
+spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client
+expect "root>"
+send "feature:list | grep -i odl-restconf\r"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
+
+
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh
new file mode 100644
index 0000000..a7d0e6c
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/expect
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
+# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
+#################################################################################
+
+spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client
+expect "root>"
+send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core"
+send "\r\r\r"
+expect "root>"
+send "logout\r"
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh
new file mode 100644
index 0000000..96a40ec
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# daniel.smith@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
+# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
+#################################################################################
+# Start up script for calling karaf / ODL inside a docker container.
+#
+# This script will also call a couple expect scripts to load the feature set that we want
+
+
+#ENV
+export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
+
+#MAIN
+echo "Starting up the da Sheilds..."
+/opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/karaf server &
+echo "Sleeping 5 bad hack"
+sleep 10
+echo "should see stuff listening now"
+netstat -na
+echo " should see proess running for karaf"
+ps -efa
+echo " Starting the packages we want"
+/etc/init.d/speak.sh
+echo "Printout the status - if its right, you should see 8181 appear now"
+netstat -na
+ps -efa
+
+
+
+## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute
+## Cheap - but effective
+while true;
+do
+ echo "Checking status of ODL:"
+ /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/status
+ sleep 60
+done
+
+
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh
new file mode 100644
index 0000000..f7f3d6e
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh
@@ -0,0 +1,219 @@
+#!/bin/bash
+# script to remove bridges and reset networking for ODL
+
+
+#VARS
+MODE=0
+DNS=8.8.8.8
+
+#ENV
+source ~/openrc
+
+# GET IPS for that node
+function get_ips {
+ BR_MGMT=`grep address /etc/network/ifcfg_backup/ifcfg-br-mgmt | awk -F" " '{print $2}'`
+ BR_STORAGE=`grep address /etc/network/ifcfg_backup/ifcfg-br-storage | awk -F" " '{print $2}'`
+ BR_FW_ADMIN=`grep address /etc/network/ifcfg_backup/ifcfg-br-fw-admin | awk -F" " '{print $2}'`
+ BR_EX=`grep address /etc/network/ifcfg_backup/ifcfg-br-ex | awk -F" " '{print $2}'`
+ DEF_NETMASK=255.255.255.0
+ DEF_GW=172.30.9.1
+}
+
+function backup_ifcfg {
+ echo " backing up "
+ mkdir -p /etc/network/ifcfg_backup
+ mv /etc/network/interfaces.d/ifcfg-br-ex /etc/network/ifcfg_backup/.
+ mv /etc/network/interfaces.d/ifcfg-br-fw-admin /etc/network/ifcfg_backup/.
+ mv /etc/network/interfaces.d/ifcfg-br-mgmt /etc/network/ifcfg_backup/.
+ mv /etc/network/interfaces.d/ifcfg-br-storage /etc/network/ifcfg_backup/.
+ mv /etc/network/interfaces.d/ifcfg-br-prv /etc/network/ifcfg_backup/.
+ mv /etc/network/interfaces.d/ifcfg-eth0 /etc/network/ifcfg_backup/.
+ mv /etc/network/interfaces.d/ifcfg-eth1 /etc/network/ifcfg_backup/.
+ rm -rf /etc/network/interfaces.d/ifcfg-eth1.300
+ rm -rf /etc/network/interfaces.d/ifcfg-eth1.301
+ rm -rf /etc/network/interfaces.d/ifcfg-eth1
+ rm -rf /etc/network/interfaces.d/ifcfg-eth0
+
+}
+
+
+function create_ifcfg_br_mgmt {
+ echo "migrating br_mgmt"
+ echo "auto eth1.300" >> /etc/network/interfaces.d/ifcfg-eth1.300
+ echo "iface eth1.300 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.300
+ echo " address $BR_MGMT" >> /etc/network/interfaces.d/ifcfg-eth1.300
+ echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.300
+}
+
+function create_ifcfg_br_storage {
+ echo "migration br_storage"
+ echo "auto eth1.301" >> /etc/network/interfaces.d/ifcfg-eth1.301
+ echo "iface eth1.301 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.301
+ echo " address $BR_STORAGE" >> /etc/network/interfaces.d/ifcfg-eth1.301
+ echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.301
+}
+
+function create_ifcfg_br_fw_admin {
+ echo " migratinng br_fw_admin"
+ echo "auto eth1" >> /etc/network/interfaces.d/ifcfg-eth1
+ echo "iface eth1 inet static" >> /etc/network/interfaces.d/ifcfg-eth1
+ echo " address $BR_FW_ADMIN" >> /etc/network/interfaces.d/ifcfg-eth1
+ echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1
+}
+
+function create_ifcfg_eth0 {
+ echo "migratinng br-ex to eth0 - temporarily"
+ echo "auto eth0" >> /etc/network/interfaces.d/ifcfg-eth0
+ echo "iface eth0 inet static" >> /etc/network/interfaces.d/ifcfg-eth0
+ echo " address $BR_EX" >> /etc/network/interfaces.d/ifcfg-eth0
+ echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth0
+ echo " gateway $DEF_GW" >> /etc/network/interfaces.d/ifcfg-eth0
+}
+
+function set_mode {
+ if [ -d "/var/lib/glance/images" ]
+ then
+ echo " controller "
+ MODE=0
+ else
+ echo " compute "
+ MODE=1
+ fi
+}
+
+
+function stop_ovs {
+ echo "Stopping OpenVSwitch"
+ service openvswitch-switch stop
+
+}
+
+function start_ovs {
+ echo "Starting OVS"
+ service openvswitch-switch start
+ ovs-vsctl show
+}
+
+
+function clean_ovs {
+ echo "cleaning OVS DB"
+ stop_ovs
+ rm -rf /var/log/openvswitch/*
+ mkdir -p /opt/opnfv/odl/ovs_back
+ cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/.
+ rm -rf /etc/openvswitch/conf.db
+ echo "restarting OVS - you should see Nothing there"
+ start_ovs
+}
+
+
+
+function reboot_me {
+ reboot
+}
+
+function allow_challenge {
+ sed -i -e 's/ChallengeResponseAuthentication no/ChallengeResponseAuthentication yes/g' /etc/ssh/sshd_config
+ service ssh restart
+}
+
+function clean_neutron {
+ subnets=( `neutron subnet-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+ networks=( `neutron net-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+ ports=( `neutron port-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+ routers=( `neutron router-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
+
+ #display all elements
+ echo "SUBNETS: ${subnets[@]} "
+ echo "NETWORKS: ${networks[@]} "
+ echo "PORTS: ${ports[@]} "
+ echo "ROUTERS: ${routers[@]} "
+
+
+ # get port and subnet for each router
+ for i in "${routers[@]}"
+ do
+ routerport=( `neutron router-port-list $i | awk -F" " '{print $2}' | grep -v id | sed '/^$/d' `)
+ routersnet=( `neutron router-port-list $i | awk -F" " '{print $8}' | grep -v fixed | sed '/^$/d' | sed 's/,$//' | sed -e 's/^"//' -e 's/"$//' `)
+ done
+
+ echo "ROUTER PORTS: ${routerport[@]} "
+ echo "ROUTER SUBNET: ${routersnet[@]} "
+
+ #remove router subnets
+ echo "router-interface-delete"
+ for i in "${routersnet[@]}"
+ do
+ neutron router-interface-delete ${routers[0]} $i
+ done
+
+ #remove subnets
+ echo "subnet-delete"
+ for i in "${subnets[@]}"
+ do
+ neutron subnet-delete $i
+ done
+
+ #remove nets
+ echo "net-delete"
+ for i in "${networks[@]}"
+ do
+ neutron net-delete $i
+ done
+
+ #remove routers
+ echo "router-delete"
+ for i in "${routers[@]}"
+ do
+ neutron router-delete $i
+ done
+
+ #remove ports
+ echo "port-delete"
+ for i in "${ports[@]}"
+ do
+ neutron port-delete $i
+ done
+
+ #remove subnets
+ echo "subnet-delete second pass"
+ for i in "${subnets[@]}"
+ do
+ neutron subnet-delete $i
+ done
+
+}
+
+function set_dns {
+ sed -i -e 's/nameserver 10.20.0.2/nameserver $DNS/g' /etc/resolv.conf
+}
+
+
+#OUTPUT
+
+function check {
+ echo $BR_MGMT
+ echo $BR_STORAGE
+ echo $BR_FW_ADMIN
+ echo $BR_EX
+}
+
+### MAIN
+
+
+set_mode
+backup_ifcfg
+get_ips
+create_ifcfg_br_mgmt
+create_ifcfg_br_storage
+create_ifcfg_br_fw_admin
+if [ $MODE == "0" ]
+then
+ create_ifcfg_eth0
+fi
+allow_challenge
+clean_ovs
+check
+reboot_me
+
+
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh
new file mode 100755
index 0000000..145da80
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh
@@ -0,0 +1,192 @@
+#!/bin/bash
+#
+# Author: Daniel Smith (Ericsson)
+#
+# Script to update neutron configuration for OVSDB/ODL integratino
+#
+# Usage - Set / pass CONTROL_HOST to your needs
+#
+### SET THIS VALUE TO MATCH YOUR SYSTEM
+CONTROL_HOST=192.168.0.2
+BR_EX_IP=172.30.9.70
+
+# ENV
+source ~/openrc
+# VARS
+ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
+MODE=0
+
+
+# FUNCTIONS
+# Update ml2_conf.ini
+function update_ml2conf {
+ echo "Backing up and modifying ml2_conf.ini"
+ cp $ML2_CONF $ML2_CONF.bak
+ sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
+ sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
+ sed -i -e 's/bridge_mappings=physnet2:br-prv/bridge_mappings=physnet1:br-ex/g' $ML2_CONF
+ echo "[ml2_odl]" >> $ML2_CONF
+ echo "password = admin" >> $ML2_CONF
+ echo "username = admin" >> $ML2_CONF
+ echo "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
+}
+
+function reset_neutrondb {
+ echo "Reseting DB"
+ mysql -e "drop database if exists neutron_ml2;"
+ mysql -e "create database neutron_ml2 character set utf8;"
+ mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
+ neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+}
+
+function restart_neutron {
+ echo "Restarting Neutron Server"
+ service neutron-server restart
+ echo "Should see Neutron runing now"
+ service neutron-server status
+ echo "Shouldnt be any nets, but should work (return empty)"
+ neutron net-list
+}
+
+function stop_neutron {
+ echo "Stopping Neutron / OVS components"
+ service neutron-plugin-openvswitch-agent stop
+ if [ $MODE == "0" ]
+ then
+ service neutron-server stop
+ fi
+}
+
+function disable_agent {
+ echo "Disabling Neutron Plugin Agents from running"
+ service neutron-plugin-openvswitch-agent stop
+ echo 'manual' > /etc/init/neutron-plugin-openvswitch-agent.override
+}
+
+
+
+function verify_ML2_working {
+ echo "checking that we can talk via ML2 properly"
+ curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
+ if grep "network" /tmp/check_ml2
+ then
+ echo "Success - ML2 to ODL is working"
+ else
+ echo "im sorry Jim, but its dead"
+ fi
+
+}
+
+
+function set_mode {
+ if [ -d "/var/lib/glance/images" ]
+ then
+ echo "Controller Mode"
+ MODE=0
+ else
+ echo "Compute Mode"
+ MODE=1
+ fi
+}
+
+function stop_ovs {
+ echo "Stopping OpenVSwitch"
+ service openvswitch-switch stop
+
+}
+
+function start_ovs {
+ echo "Starting OVS"
+ service openvswitch-vswitch start
+ ovs-vsctl show
+}
+
+
+function control_setup {
+ echo "Modifying Controller"
+ stop_neutron
+ stop_ovs
+ disable_agent
+ rm -rf /var/log/openvswitch/*
+ mkdir -p /opt/opnfv/odl/ovs_back
+ mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/.
+ mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/.
+ rm -rf /etc/openvswitch/conf.db
+ rm -rf /etc/openvswitch/.conf*
+ service openvswitch-switch start
+ ovs-vsctl add-br br-ex
+ ovs-vsctl add-port br-ex eth0
+ ovs-vsctl set interface br-ex type=external
+ ifconfig br-ex 172.30.9.70/24 up
+ service neutron-server restart
+
+ echo "setting up networks"
+ ip link add link eth1 name br-mgmt type vlan id 300
+ ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp
+ ip link add link eth1 name br-storage type vlan id 301
+ ip link add link eth1 name br-prv type vlan id 1000
+ ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp
+ ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp
+
+ echo "Setting ODL Manager IP"
+ ovs-vsctl set-manager tcp:192.168.0.2:6640
+
+ echo "Verifying ODL ML2 plugin is working"
+ verify_ML2_working
+
+ # BAD HACK - Should be parameterized - this is to catch up
+ route add default gw 172.30.9.1
+
+}
+
+function clean_ovs {
+ echo "cleaning OVS DB"
+ stop_ovs
+ rm -rf /var/log/openvswitch/*
+ mkdir -p /opt/opnfv/odl/ovs_back
+ cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/.
+ rm -rf /etc/openvswitch/conf.db
+ echo "restarting OVS - you should see Nothing there"
+ start_ovs
+}
+
+function compute_setup {
+ echo "Modifying Compute"
+ echo "Disabling neutron openvswitch plugin"
+ stop_neutron
+ disable_agent
+ ip link add link eth1 name br-mgmt type vlan id 300
+ ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp
+ ip link add link eth1 name br-storage type vlan id 301
+ ip link add link eth1 name br-prv type vlan id 1000
+ ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp
+ ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp
+
+ echo "set manager, and route for ODL controller"
+ ovs-vsctl set-manager tcp:192.168.0.2:6640
+ route add 172.17.0.1 gw 192.168.0.2
+ verify_ML2_working
+}
+
+
+# MAIN
+echo "Starting to make call"
+update_ml2conf
+echo "Check Mode"
+set_mode
+
+if [ $MODE == "0" ];
+then
+ echo "Calling control setup"
+ control_setup
+elif [ $MODE == "1" ];
+then
+ echo "Calling compute setup"
+ compute_setup
+
+else
+ echo "Something is bad - call for help"
+ exit
+fi
+
+
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh
new file mode 100755
index 0000000..fa14b47
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+# Author: Daniel Smith (Ericsson)
+# Stages ODL Controlleer
+# Inputs: odl_docker_image.tar
+# Usage: ./stage_odl.sh
+
+# ENVS
+source ~/.bashrc
+source ~/openrc
+
+LOCALPATH=/opt/opnfv/odl
+DOCKERBIN=docker-latest
+ODLIMGNAME=odl_docker_image.tar
+DNS=8.8.8.8
+HOST_IP=`ifconfig br-ex | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'`
+
+
+
+# DEBUG ECHOS
+echo $LOCALPATH
+echo $DOCKERBIN
+echo $ODLIMGNAME
+echo $DNS
+echo $HOST_IP
+
+
+# Set DNS to someting external and default GW - ODL requires a connection to the internet
+sed -i -e 's/nameserver 10.20.0.2/nameserver 8.8.8.8/g' /etc/resolv.conf
+route delete default gw 10.20.0.2
+route add default gw 172.30.9.1
+
+# Start Docker daemon and in background
+echo "Starting Docker"
+chmod +x $LOCALPATH/$DOCKERBIN
+$LOCALPATH/$DOCKERBIN -d &
+#courtesy sleep for virtual env
+sleep 2
+
+# Import the ODL Container
+echo "Importing ODL Container"
+$LOCALPATH/$DOCKERBIN load -i $LOCALPATH/$ODLIMGNAME
+
+# Start ODL, load DLUX and OVSDB modules
+echo "Removing any old install found - file not found is ok here"
+$LOCALPATH/$DOCKERBIN rm odl_docker
+echo "Starting up ODL controller in Daemon mode - no shell possible"
+$LOCALPATH/$DOCKERBIN run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel
+
+# Following, you should see the docker ps listed and a port opened
+echo " you should reach ODL controller at http://HOST_IP:8181/dlux/index.html"
+$LOCALPATH/$DOCKERBINNAME ps -a
+netstat -lnt
+
+
diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh
new file mode 100755
index 0000000..347ac74
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+# Ericsson Canada Inc.
+# Authoer: Daniel Smith
+#
+# A helper script to install and setup the ODL docker conatiner on the controller
+#
+#
+# Inputs: odl_docker_image.tar
+#
+# Usage: ./start_odl_docker.sh
+echo "DEPRECATED - USE stage_odl.sh instead - this will be removed shortly once automated deployment is working - SR1"
+
+
+# ENVS
+source ~/.bashrc
+source ~/openrc
+
+# VARS
+
+# Switch for Dev mode - uses apt-get on control to cheat and get docker installed locally rather than from puppet source
+
+DEV=1
+
+# Switch for 1:1 port mapping of EXPOSED ports in Docker to the host, if set to 0, then random ports will be used - NOTE: this doesnt work for all web services X port on Host --> Y port in Container,
+# especially for SSL/HTTPS cases. Be aware.
+
+MATCH_PORT=1
+
+LOCALPATH=/opt/opnfv/odl
+DOCKERBINNAME=docker-latest
+DOCKERIMAGENAME=odl_docker_image.tar
+DNS=8.8.8.8
+HOST_IP=`ifconfig br-fw-admin | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'`
+
+
+# Set this to "1" if you want to have your docker container startup into a shell
+
+
+ENABLE_SHELL=1
+
+
+echo " Fetching Docker "
+if [ "$DEV" -eq "1" ];
+# If testing Locally (on a control node) you can set DEV=1 to enable apt-get based install on the control node (not desired target, but good for testing).
+then
+ echo "Dev Mode - Fetching from Internet";
+ echo " this wont work in production builds";
+ apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
+ mkdir -p $LOCALPATH
+ wget https://get.docker.com/builds/Linux/x86_64/docker-latest -O $LOCALPATH/$DOCKERBINNAME
+ wget http://ftp.us.debian.org/debian/pool/main/d/docker.io/docker.io_1.3.3~dfsg1-2_amd64.deb
+ chmod 777 $LOCALPATH/$DOCKERBINNAME
+ echo "done ";
+else
+ echo "Using Binaries delivered from Puppet"
+ echo "Starting Docker in Daemon mode"
+ chmod +x $LOCALPATH/$DOCKERBINNAME
+ $LOCALPATH/$DOCKERBINNAME -d &
+
+ # wait until docker will be fully initialized
+ # before any further action against just started docker
+ sleep 5
+fi
+
+
+# We need to perform some cleanup of the Openstack Environment
+echo "TODO -- This should be automated in the Fuel deployment at some point"
+echo "However, the timing should come after basic tests are running, since this "
+echo " part will remove the subnet router association that is deployed automativally"
+echo " via fuel. Refer to the ODL + Openstack Integration Page "
+
+# Import the ODL container into docker
+
+echo "Importing ODL container into docker"
+$LOCALPATH/$DOCKERBINNAME load -i $LOCALPATH/$DOCKERIMAGENAME
+
+echo " starting up ODL - DLUX and Mapping Ports"
+if [ "$MATCH_PORT" -eq "1" ]
+then
+ echo "Starting up Docker..."
+ $LOCALPATH/$DOCKERBINNAME rm odl_docker
+fi
+
+if [ "$ENABLE_SHELL" -eq "1" ];
+then
+ echo "Starting Container in Interactive Mode (/bin/bash will be provided, you will need to run ./start_odl_docker.sh inside the container yourself)"
+ $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -t loving_daniel /bin/bash
+else
+ echo "Starting Conatiner in Daemon mode - no shell will be provided and docker attach will not provide shell)"
+ $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel
+ echo "should see the process listed here in docker ps -a"
+ $LOCALPATH/$DOCKERBINNAME ps -a;
+ echo "Match Port enabled, you can reach the DLUX login at: "
+ echo "http://$HOST_IP:8181/dlux.index.html"
+fi
diff --git a/fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh b/fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh
new file mode 100644
index 0000000..d292acd
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh
@@ -0,0 +1,164 @@
+#!/bin/bash
+#
+# Author: Daniel Smith (Ericsson)
+#
+# Script to update neutron configuration for OVSDB/ODL integratino
+#
+# Usage - Set / pass CONTROL_HOST to your needs
+#
+CONTROL_HOST=172.30.9.70
+
+# ENV
+source ~/openrc
+
+# VARS
+ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
+MODE=0
+
+
+# FUNCTIONS
+
+# Update ml2_conf.ini
+function update_ml2conf {
+ echo "Backing up and modifying ml2_conf.ini"
+ cp $ML2_CONF $ML2_CONF.bak
+ sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
+ sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
+ cat "[ml2_odl]" >> $ML2_CONF
+ cat "password = admin" >> $ML2_CONF
+ cat "username = admin" >> $ML2_CONF
+ cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
+}
+
+function reset_neutrondb {
+ echo "Reseting DB"
+ mysql -e "drop database if exists neutron_ml2;"
+ mysql -e "create database neutron_ml2 character set utf8;"
+ mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
+ neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+}
+
+function restart_neutron {
+ echo "Restarting Neutron Server"
+ service neutron-server restart
+ echo "Should see Neutron runing now"
+ service neutron-server status
+ echo "Shouldnt be any nets, but should work (return empty)"
+ neutron net-list
+}
+
+function stop_neutron {
+ echo "Stopping Neutron / OVS components"
+ service neutron-plugin-openvswitch-agent stop
+ if [ $MODE == "0" ]
+ then
+ service neutron-server stop
+ fi
+}
+
+
+
+function verify_ML2_working {
+ echo "checking that we can talk via ML2 properly"
+ curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
+ if grep "network" /tmp/check_ml2
+ then
+ echo "Success - ML2 to ODL is working"
+ else
+ echo "im sorry Jim, but its dead"
+ fi
+
+}
+
+
+function set_mode {
+ if ls -l /var/lib/glance/images
+ then
+ echo "Controller Mode"
+ MODE=0
+ else
+ echo "Compute Mode"
+ MODE=1
+ fi
+}
+
+function stop_ovs {
+ echo "Stopping OpenVSwitch"
+ service openvswitch-switch stop
+
+}
+
+function control_setup {
+ echo "Modifying Controller"
+ stop_neutron
+ stop_ovs
+ rm -rf /var/log/openvswitch/*
+ mkdir -p /opt/opnfv/odl/ovs_back
+ mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/.
+ mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/.
+ service openvswitch-switch start
+ ovs-vsctl set-manager tcp:172.30.9.70:6640
+ ovs-vsctl add-br br-eth0
+ ovs-vsctl add-br br-ex
+ ovs-vsctl add-port br-eth0 eth0
+ ovs-vsctl add-port br-eth0 br-eth0--br-ex
+ ovs-vsctl add-port br-ex br-ex--br-eth0
+ ovs-vsctl set interface br-ex--br-eth0 type=patch
+ ovs-vsctl set interface br-eth0--br-ex type=patch
+ ovs-vsctl set interface br-ex--br-eth0 options:peer=br-eth0--br-ex
+ ovs-vsctl set interface br-eth0--br-ex options:peer=br-ex--br-eth0
+ ifconfig br-ex 172.30.9.70/24 up
+ service neutron-server restart
+
+ echo "setting up networks"
+ ip link add link eth1 name br-mgmt type vlan id 300
+ ip link add link eth1 name br-storage type vlan id 301
+ /etc/init.d/networking restart
+
+
+ echo "Reset Neutron DB"
+ #reset_neutrondb
+ echo "Restarting Neutron Components"
+ #restart_neutron
+ echo "Verifying ODL ML2 plugin is working"
+ verify_ML2_working
+
+}
+
+function compute_setup {
+ echo "do compute stuff here"
+ echo "stopping neutron openvswitch plugin"
+ stop_neutron
+ ip link add link eth1 name br-mgmt type vlan id 300
+ ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24
+ ip link add link eth1 name br-storage type vlan id 301
+ ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24
+ ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-mgmt | awk -F" " '{print $2}'`/24
+ echo "set manager, and route for ODL controller"
+ ovs-vsctl set-manager tcp:192.168.0.2:6640
+ route add 172.17.0.1 gw 192.168.0.2
+ verify_ML2_working
+}
+
+
+# MAIN
+echo "Starting to make call"
+update_ml2conf
+echo "Check Mode"
+set_mode
+
+if [ $MODE == "0" ];
+then
+ echo "Calling control setup"
+ control_setup
+elif [ $MODE == "1" ];
+then
+ echo "Calling compute setup"
+ compute_setup
+
+else
+ echo "Something is bad - call for help"
+ exit
+fi
+
+
diff --git a/fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh b/fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh
new file mode 100644
index 0000000..3b688ae
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh
@@ -0,0 +1,146 @@
+#!/bin/bash
+CONTROL_HOST=172.17.0.3
+
+# ENV
+source ~/openrc
+
+
+
+# VARS
+ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
+MODE=0
+
+
+# FUCNTIONS
+
+
+# Update ml2_conf.ini
+function update_ml2conf {
+ echo "Backing up and modifying ml2_conf.ini"
+ cp $ML2_CONF $ML2_CONF.bak
+ sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
+#!/bin/bash
+CONTROL_HOST=172.17.0.3
+
+# ENV
+source ~/openrc
+
+
+
+# VARS
+ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
+MODE=0
+
+
+# FUCNTIONS
+
+
+# Update ml2_conf.ini
+function update_ml2conf {
+ echo "Backing up and modifying ml2_conf.ini"
+ cp $ML2_CONF $ML2_CONF.bak
+ sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
+ sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
+ cat "[ml2_odl]" >> $ML2_CONF
+ cat "password = admin" >> $ML2_CONF
+ cat "username = admin" >> $ML2_CONF
+ cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
+}
+
+function reset_neutrondb {
+ echo "Reseting DB"
+ mysql -e "drop database if exists neutron_ml2;"
+ mysql -e "create database neutron_ml2 character set utf8;"
+ mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
+ neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
+}
+
+function restart_neutron {
+ echo "Restarting Neutron Server"
+ service neutron-server restart
+ echo "Should see Neutron runing now"
+ service neutron-server status
+ echo "Shouldnt be any nets, but should work (return empty)"
+ neutron net-list
+}
+
+function stop_neutron {
+ echo "Stopping Neutron / OVS components"
+ service neutron-plugin-openvswitch-agent stop
+ if [ $MODE == "0" ]
+ then
+ service neutron-server stop
+ fi
+}
+
+
+
+function verify_ML2_working {
+ echo "checking that we can talk via ML2 properly"
+ curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
+ if grep "network" /tmp/check_ml2
+ then
+ echo "Success - ML2 to ODL is working"
+ else
+ echo "im sorry Jim, but its dead"
+ fi
+
+}
+
+
+function set_mode {
+ if df -k | grep glance
+ then
+ echo "Controller Mode"
+ MODE=0
+ else
+ echo "Compute Mode"
+ MODE=1
+ fi
+}
+
+function stop_ovs {
+ echo "Stopping OpenVSwitch"
+ service openvswitch-switch stop
+
+}
+
+function control_setup {
+ echo "do control stuff here"
+ echo "Reset Neutron DB"
+ #reset_neutrondb
+ echo "Restarting Neutron Components"
+ #restart_neutron
+ echo "Verifying ODL ML2 plugin is working"
+ verify_ML2_working
+
+}
+
+function compute_setup {
+ echo "do compute stuff here"
+ stop_neutron
+ verify_ML2_working
+}
+
+
+# MAIN
+echo "Starting to make call"
+#update_ml2conf
+echo "Check Mode"
+set_mode
+
+if [ $MODE == "0" ];
+then
+ echo "Calling control setup"
+ control_setup
+elif [ $MODE == "1" ];
+then
+ echo "Calling compute setup"
+ compute_setup
+
+else
+ echo "Something is bad - call for help"
+ exit
+fi
+
+
diff --git a/fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh b/fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh
new file mode 100755
index 0000000..dd4fc9f
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+# a "cheat" way to install docker on the controller
+# can only be used if you have a connecting out to the internet
+
+# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
+
+OLDGW=$1
+#!/bin/bash
+# a "cheat" way to install docker on the controller
+# can only be used if you have a connecting out to the internet
+
+# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
+
+OLDGW=$1
+NEWGW=$2
+IMAGEPATH=/opt/opnfv
+IMAGENAME=odl_docker_image.tar
+SOURCES=/etc/apt/sources.list
+
+
+if [ "$#" -ne 2]; then
+ echo "Two args not provided, will not touch networking"
+else
+
+ # Fix routes
+ echo "Fixing routes"
+ #DEBUG
+ netstat -rn
+
+ echo "delete old def route"
+ route delete default gw $1
+ echo "adding new def route"
+ route add default gw $2
+
+ echo " you should see a good nslookup now"
+ nslookup www.google.ca
+#!/bin/bash
+# a "cheat" way to install docker on the controller
+# can only be used if you have a connecting out to the internet
+
+# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
+
+OLDGW=$1
+NEWGW=$2
+IMAGEPATH=/opt/opnfv
+IMAGENAME=odl_docker_image.tar
+SOURCES=/etc/apt/sources.list
+
+
+if [ "$#" -ne 2]; then
+ echo "Two args not provided, will not touch networking"
+else
+
+ # Fix routes
+ echo "Fixing routes"
+ #DEBUG
+ netstat -rn
+
+ echo "delete old def route"
+ route delete default gw $1
+ echo "adding new def route"
+ route add default gw $2
+
+ echo " you should see a good nslookup now"
+ nslookup www.google.ca
+fi
+
+
+if egrep "mirrors.txt" $SOURCES
+then
+ echo "Sources was already updated, not touching"
+else
+ echo "adding the closests mirrors and docker mirror to the mix"
+ echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise main restricted universe multiverse" >> /etc/apt/sources.list
+ echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-updates main restricted universe multiverse" >> /etc/apt/sources.list
+ echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-backports main restricted universe multiverse" >> /etc/apt/sources.list
+ echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-security main restricted universe multiverse" >> /etc/apt/sources.list
+ apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
+ echo "deb https://get.docker.com/ubuntu docker main " > /etc/apt/sources.list.d/docker.list
+fi
+
+echo "Updating"
+apt-get update
+echo "Installing Docker"
+apt-get install -y lxc-docker
+
+echo "Loading ODL Docker Image"
+docker load -i $IMAGEPATH/$IMAGENAME
+
+
diff --git a/fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh b/fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh
new file mode 100644
index 0000000..42c9451
--- /dev/null
+++ b/fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+
+
+ok .. so they created br-int
+
+so lets add a physical nic to it
+
+
+# First - Removal all the bridges you find
+
+for i in $(ovs-vsctl list-br)
+do
+ if [ "$i" == "br-int" ];
+ then
+ echo "skipped br-int"
+ elif [ "$i" == "br-prv"];
+ then
+ echo "skipped br-pr"
+ else
+ ovs-vsctl del-br $i
+ fi
+done
diff --git a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp
index 54f1c86..436f496 100644
--- a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp
+++ b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp
@@ -25,4 +25,6 @@ class opnfv {
include opnfv::add_packages
# Setup OpenDaylight
include opnfv::odl_docker
+ # Setup OpenDaylight
+ include opnfv::odl_lith_docker
}
diff --git a/fuel/deploy/README.txt b/fuel/deploy/README.txt
index d392f8f..6f322d0 100644
--- a/fuel/deploy/README.txt
+++ b/fuel/deploy/README.txt
@@ -1,71 +1,109 @@
-======== How to prepare and run the OPNFV Autodeployment =======
+======== PREREQUISITES ========
-in fuel/build/deploy run these:
+the following applications and python modules are required to be installed:
+- example for Ubuntu environment:
+sudo apt-get install -y libvirt-bin qemu-kvm tightvncserver virt-manager
+sshpass fuseiso genisoimage blackbox xterm python-pip
+sudo restart libvirt-bin
+sudo pip install pyyaml netaddr paramiko lxml scp
---- Step.1 Install prerequisites
-sudo ./install-ubuntu-packages.sh
+======== PREPARE and RUN the OPNFV Autodeployment ========
+--- Step.1 Prepare the DEA and DHA configuration files and the OPNFV ISO file
+Make sure that you are using the right DEA - Deployment Environment Adapter and
+DHA - Deployment Hardware Adapter configuration files, the ones provided are only templates
+you will have to modify them according to your needs
+- If wou wish to deploy OPNFV cloud environment on top of KVM/Libvirt
+ virtualization use as example the following configuration files:
---- Step.2-A If wou want to deploy OPNFV cloud environment on top of KVM/Libvirt virtualization
- run the following environment setup script
+ => libvirt/conf/ha
+ dea.yaml
+ dha.yaml
-sudo python setup_environment.py <storage_directory> <path_to_dha_file>
+ => libvirt/conf/multinode
+ dea.yaml
+ dha.yaml
-Example:
- sudo python setup_environment.py /mnt/images dha.yaml
+- If you wish to deploy OPNFV cloud environment on baremetal
+ use as example the following configuration files:
+ => baremetal/conf/ericsson_montreal_lab/ha
+ dea.yaml
+ dha.yaml
+ => baremetal/conf/ericsson_montreal_lab/multinode
+ dea.yaml
+ dha.yaml
+ => baremetal/conf/linux_foundation_lab/ha
+ dea.yaml
+ dha.yaml
+ => baremetal/conf/linux_foundation_lab/multinode
+ dea.yaml
+ dha.yaml
---- Step.2-B If you want to deploy OPNFV cloud environment on baremetal run the
- following environment setup script
-sudo python setup_vfuel.py <storage_directory> <path_to_dha_file>
+--- Step.2 Run Autodeployment:
-Example:
- sudo python setup_vfuel.py /mnt/images dha.yaml
+usage: python deploy.py [-h] [-nf]
+ [iso_file] dea_file dha_file [storage_dir]
+ [pxe_bridge]
+positional arguments:
+ iso_file ISO File [default: OPNFV.iso]
+ dea_file Deployment Environment Adapter: dea.yaml
+ dha_file Deployment Hardware Adapter: dha.yaml
+ storage_dir Storage Directory [default: images]
+ pxe_bridge Linux Bridge for booting up the Fuel Master VM [default: pxebr]
-WARNING!:
-setup_vfuel.py adds the following snippet into /etc/network/interfaces
-making sure to replace in setup_vfuel.py interfafe 'p1p1.20' with your actual outbound
-interface in order to provide network access to the Fuel master for DNS and NTP.
+optional arguments:
+ -h, --help show this help message and exit
+ -nf Do not install Fuel Master (and Node VMs when using libvirt)
-iface vfuelnet inet static
- bridge_ports em1
- address 10.40.0.1
- netmask 255.255.255.0
- pre-down iptables -t nat -D POSTROUTING --out-interface p1p1.20 -j MASQUERADE -m comment --comment "vfuelnet"
- pre-down iptables -D FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
- post-up iptables -t nat -A POSTROUTING --out-interface p1p1.20 -j MASQUERADE -m comment --comment "vfuelnet"
- post-up iptables -A FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
+* WARNING:
+If <storage_dir> is not specified, Autodeployment will use
+"<current_working_dir>/images" as default, and it will create it,
+if it hasn't been created before
+If <pxe_bridge> is not specified, Autodeployment will use "pxebr" as default,
+if the bridge does not exist, the application will terminate with an error message
+IF <storage_dir> is not specified, Autodeployment will use "<current_working_dir>/OPNFV.iso"
+as default, if the iso file does not exist, the application will terminate with an error message
+<pxe_bridge> is not required for Autodeployment in virtual environment, even if it is specified
+it will not be used at all
---- Step.3 Start Autodeployment
-Make sure you use the right Deployment Environment Adapter and
-Deployment Hardware Adaper configuration files:
- - for baremetal: baremetal/dea.yaml baremetal/dha.yaml
+* EXAMPLES:
- - for libvirt: libvirt/dea.yaml libvirt/dha.yaml
+- Install Fuel Master and deploy OPNFV Cloud from scratch on Baremetal Environment
+sudo python deploy.py ~/ISO/opnfv.iso ~/CONF/baremetal/dea.yaml ~/CONF/baremetal/dha.yaml /mnt/images pxebr
-sudo python deploy.py [-nf] <isofile> <deafile> <dhafile>
-Example:
- sudo python deploy.py ~/ISO/opnfv.iso baremetal/dea.yaml baremetal/dha.yaml
+- Install Fuel Master and deploy OPNFV Cloud from scratch on Virtual Environment
+
+sudo python deploy.py ~/ISO/opnfv.iso ~/CONF/virtual/dea.yaml ~/CONF/virtual/dha.yaml /mnt/images
+
+
+
+- Deploy OPNFV Cloud on an already active Environment where Fuel Master VM is running
+ so no need to install Fuel again
+
+sudo python deploy.py -nf ~/CONF/baremetal/dea.yaml ~/CONF/baremetal/dha.yaml
+
+sudo python deploy.py -nf ~/CONF/virtual/dea.yaml ~/CONF/virtual/dha.yaml
diff --git a/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dea.yaml b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dea.yaml
new file mode 100644
index 0000000..dc8014d
--- /dev/null
+++ b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dea.yaml
@@ -0,0 +1,993 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: Tue May 5 15:33:07 UTC 2015
+comment: Test environment Ericsson Montreal
+environment_name: opnfv
+environment_mode: ha
+wanted_release: Juno on Ubuntu 12.04.4
+nodes:
+- id: 1
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 2
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 3
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 4
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 5
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 6
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+fuel:
+ ADMIN_NETWORK:
+ ipaddress: 10.40.0.2
+ netmask: 255.255.255.0
+ dhcp_pool_start: 10.40.0.3
+ dhcp_pool_end: 10.40.0.254
+ DNS_UPSTREAM: 10.118.32.193
+ DNS_DOMAIN: opnfvericsson.ca
+ DNS_SEARCH: opnfvericsson.ca
+ FUEL_ACCESS:
+ user: admin
+ password: admin
+ HOSTNAME: opnfv
+ NTP1: 10.118.34.219
+ NTP2:
+ NTP3:
+interfaces:
+ interfaces_1:
+ eth0:
+ - fuelweb_admin
+ eth2:
+ - public
+ - management
+ - storage
+ - private
+transformations:
+ transformations_1:
+ - action: add-br
+ name: br-eth0
+ - action: add-port
+ bridge: br-eth0
+ name: eth0
+ - action: add-br
+ name: br-eth1
+ - action: add-port
+ bridge: br-eth1
+ name: eth1
+ - action: add-br
+ name: br-eth2
+ - action: add-port
+ bridge: br-eth2
+ name: eth2
+ - action: add-br
+ name: br-eth3
+ - action: add-port
+ bridge: br-eth3
+ name: eth3
+ - action: add-br
+ name: br-eth4
+ - action: add-port
+ bridge: br-eth4
+ name: eth4
+ - action: add-br
+ name: br-eth5
+ - action: add-port
+ bridge: br-eth5
+ name: eth5
+ - action: add-br
+ name: br-ex
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-fw-admin
+ - action: add-br
+ name: br-prv
+ - action: add-patch
+ bridges:
+ - br-eth2
+ - br-storage
+ tags:
+ - 220
+ - 0
+ vlan_ids:
+ - 220
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth2
+ - br-mgmt
+ tags:
+ - 320
+ - 0
+ vlan_ids:
+ - 320
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth0
+ - br-fw-admin
+ - action: add-patch
+ bridges:
+ - br-eth2
+ - br-ex
+ tags:
+ - 20
+ - 0
+ vlan_ids:
+ - 20
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth2
+ - br-prv
+ transformations_2:
+ - action: add-br
+ name: br-eth0
+ - action: add-port
+ bridge: br-eth0
+ name: eth0
+ - action: add-br
+ name: br-eth1
+ - action: add-port
+ bridge: br-eth1
+ name: eth1
+ - action: add-br
+ name: br-eth2
+ - action: add-port
+ bridge: br-eth2
+ name: eth2
+ - action: add-br
+ name: br-eth3
+ - action: add-port
+ bridge: br-eth3
+ name: eth3
+ - action: add-br
+ name: br-eth4
+ - action: add-port
+ bridge: br-eth4
+ name: eth4
+ - action: add-br
+ name: br-eth5
+ - action: add-port
+ bridge: br-eth5
+ name: eth5
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-fw-admin
+ - action: add-br
+ name: br-prv
+ - action: add-patch
+ bridges:
+ - br-eth2
+ - br-storage
+ tags:
+ - 220
+ - 0
+ vlan_ids:
+ - 220
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth2
+ - br-mgmt
+ tags:
+ - 320
+ - 0
+ vlan_ids:
+ - 320
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth0
+ - br-fw-admin
+ - action: add-patch
+ bridges:
+ - br-eth2
+ - br-prv
+opnfv:
+ compute: {}
+ controller: {}
+network:
+ networking_parameters:
+ base_mac: fa:16:3e:00:00:00
+ dns_nameservers:
+ - 10.118.32.193
+ floating_ranges:
+ - - 10.118.34.226
+ - 10.118.34.230
+ gre_id_range:
+ - 2
+ - 65535
+ internal_cidr: 192.168.111.0/24
+ internal_gateway: 192.168.111.1
+ net_l23_provider: ovs
+ segmentation_type: vlan
+ vlan_range:
+ - 2022
+ - 2023
+ networks:
+ - cidr: 10.118.34.192/24
+ gateway: 10.118.34.193
+ ip_ranges:
+ - - 10.118.34.220
+ - 10.118.34.225
+ meta:
+ assign_vip: true
+ cidr: 10.118.34.192/24
+ configurable: true
+ floating_range_var: floating_ranges
+ ip_range:
+ - 10.118.34.220
+ - 10.118.34.225
+ map_priority: 1
+ name: public
+ notation: ip_ranges
+ render_addr_mask: public
+ render_type: null
+ use_gateway: true
+ vlan_start: null
+ name: public
+ vlan_start: null
+ - cidr: 192.168.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.0.2
+ - 192.168.0.254
+ meta:
+ assign_vip: true
+ cidr: 192.168.0.0/24
+ configurable: true
+ map_priority: 2
+ name: management
+ notation: cidr
+ render_addr_mask: internal
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 320
+ name: management
+ vlan_start: 320
+ - cidr: 192.168.1.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.1.2
+ - 192.168.1.254
+ meta:
+ assign_vip: false
+ cidr: 192.168.1.0/24
+ configurable: true
+ map_priority: 2
+ name: storage
+ notation: cidr
+ render_addr_mask: storage
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 220
+ name: storage
+ vlan_start: 220
+ - cidr: null
+ gateway: null
+ ip_ranges: []
+ meta:
+ assign_vip: false
+ configurable: false
+ map_priority: 2
+ name: private
+ neutron_vlan_range: true
+ notation: null
+ render_addr_mask: null
+ render_type: null
+ seg_type: vlan
+ use_gateway: false
+ vlan_start: null
+ name: private
+ vlan_start: null
+ - cidr: 10.40.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 10.40.0.3
+ - 10.40.0.254
+ meta:
+ assign_vip: false
+ configurable: false
+ map_priority: 0
+ notation: ip_ranges
+ render_addr_mask: null
+ render_type: null
+ unmovable: true
+ use_gateway: true
+ name: fuelweb_admin
+ vlan_start: null
+settings:
+ editable:
+ access:
+ email:
+ description: Email address for Administrator
+ label: email
+ type: text
+ value: admin@localhost
+ weight: 40
+ metadata:
+ label: Access
+ weight: 10
+ password:
+ description: Password for Administrator
+ label: password
+ type: password
+ value: admin
+ weight: 20
+ tenant:
+ description: Tenant (project) name for Administrator
+ label: tenant
+ regex:
+ error: Invalid tenant name
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+ type: text
+ value: admin
+ weight: 30
+ user:
+ description: Username for Administrator
+ label: username
+ regex:
+ error: Invalid username
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+ type: text
+ value: admin
+ weight: 10
+ additional_components:
+ ceilometer:
+ description: If selected, Ceilometer component will be installed
+ label: Install Ceilometer
+ type: checkbox
+ value: false
+ weight: 40
+ heat:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 30
+ metadata:
+ label: Additional Components
+ weight: 20
+ murano:
+ description: If selected, Murano component will be installed
+ label: Install Murano
+ restrictions:
+ - cluster:net_provider != 'neutron'
+ type: checkbox
+ value: false
+ weight: 20
+ sahara:
+ description: If selected, Sahara component will be installed
+ label: Install Sahara
+ type: checkbox
+ value: false
+ weight: 10
+ common:
+ auth_key:
+ description: Public key(s) to include in authorized_keys on deployed nodes
+ label: Public Key
+ type: text
+ value: ''
+ weight: 70
+ auto_assign_floating_ip:
+ description: If selected, OpenStack will automatically assign a floating IP
+ to a new instance
+ label: Auto assign floating IP
+ restrictions:
+ - cluster:net_provider == 'neutron'
+ type: checkbox
+ value: false
+ weight: 40
+ compute_scheduler_driver:
+ label: Scheduler driver
+ type: radio
+ value: nova.scheduler.filter_scheduler.FilterScheduler
+ values:
+ - data: nova.scheduler.filter_scheduler.FilterScheduler
+ description: Currently the most advanced OpenStack scheduler. See the OpenStack
+ documentation for details.
+ label: Filter scheduler
+ - data: nova.scheduler.simple.SimpleScheduler
+ description: This is 'naive' scheduler which tries to find the least loaded
+ host
+ label: Simple scheduler
+ weight: 40
+ debug:
+ description: Debug logging mode provides more information, but requires more
+ disk space.
+ label: OpenStack debug logging
+ type: checkbox
+ value: false
+ weight: 20
+ disable_offload:
+ description: If set, generic segmentation offload (gso) and generic receive
+ offload (gro) on physical nics will be disabled. See ethtool man.
+ label: Disable generic offload on physical nics
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
+ == 'gre'
+ type: checkbox
+ value: true
+ weight: 80
+ libvirt_type:
+ label: Hypervisor type
+ type: radio
+ value: kvm
+ values:
+ - data: kvm
+ description: Choose this type of hypervisor if you run OpenStack on hardware
+ label: KVM
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ - data: qemu
+ description: Choose this type of hypervisor if you run OpenStack on virtual
+ hosts.
+ label: QEMU
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ - data: vcenter
+ description: Choose this type of hypervisor if you run OpenStack in a vCenter
+ environment.
+ label: vCenter
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
+ == 'neutron'
+ weight: 30
+ metadata:
+ label: Common
+ weight: 30
+ nova_quota:
+ description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+ quotas will increase load on the Nova database.
+ label: Nova quotas
+ type: checkbox
+ value: false
+ weight: 25
+ resume_guests_state_on_host_boot:
+ description: Whether to resume previous guests state when the host reboots.
+ If enabled, this option causes guests assigned to the host to resume their
+ previous state. If the guest was running a restart will be attempted when
+ nova-compute starts. If the guest was not running previously, a restart
+ will not be attempted.
+ label: Resume guests state on host boot
+ type: checkbox
+ value: true
+ weight: 60
+ use_cow_images:
+ description: For most cases you will want qcow format. If it's disabled, raw
+ image format will be used to run VMs. OpenStack with raw format currently
+ does not support snapshotting.
+ label: Use qcow format for images
+ type: checkbox
+ value: true
+ weight: 50
+ corosync:
+ group:
+ description: ''
+ label: Group
+ type: text
+ value: 226.94.1.1
+ weight: 10
+ metadata:
+ label: Corosync
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 50
+ port:
+ description: ''
+ label: Port
+ type: text
+ value: '12000'
+ weight: 20
+ verified:
+ description: Set True only if multicast is configured correctly on router.
+ label: Need to pass network verification.
+ type: checkbox
+ value: false
+ weight: 10
+ external_dns:
+ dns_list:
+ description: List of upstream DNS servers, separated by comma
+ label: DNS list
+ type: text
+ value: 10.118.32.193
+ weight: 10
+ metadata:
+ label: Upstream DNS
+ weight: 90
+ external_ntp:
+ metadata:
+ label: Upstream NTP
+ weight: 100
+ ntp_list:
+ description: List of upstream NTP servers, separated by comma
+ label: NTP servers list
+ type: text
+ value: 10.118.34.219
+ weight: 10
+ kernel_params:
+ kernel:
+ description: Default kernel parameters
+ label: Initial parameters
+ type: text
+ value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
+ weight: 45
+ metadata:
+ label: Kernel parameters
+ weight: 40
+ neutron_mellanox:
+ metadata:
+ enabled: true
+ label: Mellanox Neutron components
+ toggleable: false
+ weight: 50
+ plugin:
+ label: Mellanox drivers and SR-IOV plugin
+ type: radio
+ value: disabled
+ values:
+ - data: disabled
+ description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+ not be installed.
+ label: Mellanox drivers and plugins disabled
+ restrictions:
+ - settings:storage.iser.value == true
+ - data: drivers_only
+ description: If selected, Mellanox Ethernet drivers will be installed to
+ support networking over Mellanox NIC. Mellanox Neutron plugin will not
+ be installed.
+ label: Install only Mellanox drivers
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm'
+ - data: ethernet
+ description: If selected, both Mellanox Ethernet drivers and Mellanox network
+ acceleration (Neutron) plugin will be installed.
+ label: Install Mellanox drivers and SR-IOV plugin
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+ == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+ weight: 60
+ vf_num:
+ description: Note that one virtual function will be reserved to the storage
+ network, in case of choosing iSER.
+ label: Number of virtual NICs
+ restrictions:
+ - settings:neutron_mellanox.plugin.value != 'ethernet'
+ type: text
+ value: '16'
+ weight: 70
+ nsx_plugin:
+ connector_type:
+ description: Default network transport type to use
+ label: NSX connector type
+ type: select
+ value: stt
+ values:
+ - data: gre
+ label: GRE
+ - data: ipsec_gre
+ label: GRE over IPSec
+ - data: stt
+ label: STT
+ - data: ipsec_stt
+ label: STT over IPSec
+ - data: bridge
+ label: Bridge
+ weight: 80
+ l3_gw_service_uuid:
+ description: UUID for the default L3 gateway service to use with this cluster
+ label: L3 service UUID
+ regex:
+ error: Invalid L3 gateway service UUID
+ source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+ type: text
+ value: ''
+ weight: 50
+ metadata:
+ enabled: false
+ label: VMware NSX
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
+ != 'nsx'
+ weight: 20
+ nsx_controllers:
+ description: One or more IPv4[:port] addresses of NSX controller node, separated
+ by comma (e.g. 10.30.30.2,192.168.110.254:443)
+ label: NSX controller endpoint
+ regex:
+ error: Invalid controller endpoints, specify valid IPv4[:port] pair
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
+ type: text
+ value: ''
+ weight: 60
+ nsx_password:
+ description: Password for Administrator
+ label: NSX password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: ''
+ weight: 30
+ nsx_username:
+ description: NSX administrator's username
+ label: NSX username
+ regex:
+ error: Empty username
+ source: \S
+ type: text
+ value: admin
+ weight: 20
+ packages_url:
+ description: URL to NSX specific packages
+ label: URL to NSX bits
+ regex:
+ error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
+ http://10.20.0.2/nsx)
+ source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
+ type: text
+ value: ''
+ weight: 70
+ replication_mode:
+ description: ''
+ label: NSX cluster has Service nodes
+ type: checkbox
+ value: true
+ weight: 90
+ transport_zone_uuid:
+ description: UUID of the pre-existing default NSX Transport zone
+ label: Transport zone UUID
+ regex:
+ error: Invalid transport zone UUID
+ source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+ type: text
+ value: ''
+ weight: 40
+ provision:
+ metadata:
+ label: Provision
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ weight: 80
+ method:
+ description: Which provision method to use for this cluster.
+ label: Provision method
+ type: radio
+ value: cobbler
+ values:
+ - data: image
+ description: Copying pre-built images on a disk.
+ label: Image
+ - data: cobbler
+ description: Install from scratch using anaconda or debian-installer.
+ label: Classic (use anaconda or debian-installer)
+ public_network_assignment:
+ assign_to_all_nodes:
+ description: When disabled, public network will be assigned to controllers
+ and zabbix-server only
+ label: Assign public network to all nodes
+ type: checkbox
+ value: false
+ weight: 10
+ metadata:
+ label: Public network assignment
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron'
+ weight: 50
+ storage:
+ ephemeral_ceph:
+ description: Configures Nova to store ephemeral volumes in RBD. This works
+ best if Ceph is enabled for volumes and images, too. Enables live migration
+ of all types of Ceph backed VMs (without this option, live migration will
+ only work with VMs launched from Cinder volumes).
+ label: Ceph RBD for ephemeral volumes (Nova)
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ type: checkbox
+ value: true
+ weight: 75
+ images_ceph:
+ description: Configures Glance to use the Ceph RBD backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: Ceph RBD for images (Glance)
+ type: checkbox
+ value: true
+ weight: 30
+ images_vcenter:
+ description: Configures Glance to use the vCenter/ESXi backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: VMWare vCenter/ESXi datastore for images (Glance)
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter'
+ type: checkbox
+ value: false
+ weight: 35
+ iser:
+ description: 'High performance block storage: Cinder volumes over iSER protocol
+ (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
+ and will use a dedicated virtual function for the storage network.'
+ label: iSER protocol for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+ != 'kvm'
+ type: checkbox
+ value: false
+ weight: 11
+ metadata:
+ label: Storage
+ weight: 60
+ objects_ceph:
+ description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+ Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+ label: Ceph RadosGW for objects (Swift API)
+ restrictions:
+ - settings:storage.images_ceph.value == false
+ type: checkbox
+ value: false
+ weight: 80
+ osd_pool_size:
+ description: Configures the default number of object replicas in Ceph. This
+ number must be equal to or lower than the number of deployed 'Storage -
+ Ceph OSD' nodes.
+ label: Ceph object replication factor
+ regex:
+ error: Invalid number
+ source: ^[1-9]\d*$
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ type: text
+ value: '2'
+ weight: 85
+ vc_datacenter:
+ description: Inventory path to a datacenter. If you want to use ESXi host
+ as datastore, it should be "ha-datacenter".
+ label: Datacenter name
+ regex:
+ error: Empty datacenter
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 65
+ vc_datastore:
+ description: Datastore associated with the datacenter.
+ label: Datastore name
+ regex:
+ error: Empty datastore
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 60
+ vc_host:
+ description: IP Address of vCenter/ESXi
+ label: vCenter/ESXi IP
+ regex:
+ error: Specify valid IPv4 address
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 45
+ vc_image_dir:
+ description: The name of the directory where the glance images will be stored
+ in the VMware datastore.
+ label: Datastore Images directory
+ regex:
+ error: Empty images directory
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: /openstack_glance
+ weight: 70
+ vc_password:
+ description: vCenter/ESXi admin password
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: password
+ value: ''
+ weight: 55
+ vc_user:
+ description: vCenter/ESXi admin username
+ label: Username
+ regex:
+ error: Empty username
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 50
+ volumes_ceph:
+ description: Configures Cinder to store volumes in Ceph RBD images.
+ label: Ceph RBD for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
+ == 'vcenter'
+ type: checkbox
+ value: true
+ weight: 20
+ volumes_lvm:
+ description: Requires at least one Storage - Cinder LVM node.
+ label: Cinder LVM over iSCSI for volumes
+ restrictions:
+ - settings:storage.volumes_ceph.value == true
+ type: checkbox
+ value: false
+ weight: 10
+ volumes_vmdk:
+ description: Configures Cinder to store volumes via VMware vCenter.
+ label: VMware vCenter for volumes (Cinder)
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
+ == true
+ type: checkbox
+ value: false
+ weight: 15
+ syslog:
+ metadata:
+ label: Syslog
+ weight: 50
+ syslog_port:
+ description: Remote syslog port
+ label: Port
+ regex:
+ error: Invalid Syslog port
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '514'
+ weight: 20
+ syslog_server:
+ description: Remote syslog hostname
+ label: Hostname
+ type: text
+ value: ''
+ weight: 10
+ syslog_transport:
+ label: Syslog transport protocol
+ type: radio
+ value: tcp
+ values:
+ - data: udp
+ description: ''
+ label: UDP
+ - data: tcp
+ description: ''
+ label: TCP
+ weight: 30
+ vcenter:
+ cluster:
+ description: vCenter cluster name. If you have multiple clusters, use comma
+ to separate names
+ label: Cluster
+ regex:
+ error: Invalid cluster list
+ source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
+ type: text
+ value: ''
+ weight: 40
+ datastore_regex:
+ description: The Datastore regexp setting specifies the data stores to use
+ with Compute. For example, "nas.*". If you want to use all available datastores,
+ leave this field blank
+ label: Datastore regexp
+ regex:
+ error: Invalid datastore regexp
+ source: ^(\S.*\S|\S|)$
+ type: text
+ value: ''
+ weight: 50
+ host_ip:
+ description: IP Address of vCenter
+ label: vCenter IP
+ regex:
+ error: Specify valid IPv4 address
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+ type: text
+ value: ''
+ weight: 10
+ metadata:
+ label: vCenter
+ restrictions:
+ - action: hide
+ condition: settings:common.libvirt_type.value != 'vcenter'
+ weight: 20
+ use_vcenter:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 5
+ vc_password:
+ description: vCenter admin password
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: admin
+ weight: 30
+ vc_user:
+ description: vCenter admin username
+ label: Username
+ regex:
+ error: Empty username
+ source: \S
+ type: text
+ value: admin
+ weight: 20
+ vlan_interface:
+ description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
+ vmnic1). If empty "vmnic0" is used by default
+ label: ESXi VLAN interface
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
+ != 'VlanManager'
+ type: text
+ value: ''
+ weight: 60
+ zabbix:
+ metadata:
+ label: Zabbix Access
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ weight: 70
+ password:
+ description: Password for Zabbix Administrator
+ label: password
+ type: password
+ value: zabbix
+ weight: 20
+ username:
+ description: Username for Zabbix Administrator
+ label: username
+ type: text
+ value: admin
+ weight: 10 \ No newline at end of file
diff --git a/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dha.yaml b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dha.yaml
new file mode 100644
index 0000000..562d6cd
--- /dev/null
+++ b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dha.yaml
@@ -0,0 +1,54 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Mon May 4 09:03:46 UTC 2015
+comment: Test environment Ericsson Montreal
+
+# Adapter to use for this definition
+adapter: hp
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+ pxeMac: 14:58:D0:54:7A:D8
+ ipmiIp: 10.118.32.198
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 2
+ pxeMac: 14:58:D0:55:E2:E0
+ ipmiIp: 10.118.32.202
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 3
+ pxeMac: 9C:B6:54:8A:25:C0
+ ipmiIp: 10.118.32.213
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 4
+ pxeMac: 14:58:D0:54:28:80
+ ipmiIp: 10.118.32.201
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 5
+ pxeMac: 14:58:D0:54:E7:88
+ ipmiIp: 10.118.32.203
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 6
+ pxeMac: 14:58:D0:54:7A:28
+ ipmiIp: 10.118.32.205
+ ipmiUser: <username>
+ ipmiPass: <password>
+# Adding the Fuel node as node id 7 which may not be correct - please
+# adjust as needed.
+- id: 7
+ libvirtName: vFuel
+ libvirtTemplate: baremetal/vms/fuel.xml
+ isFuel: yes
+ username: root
+ password: r00tme
+
+disks:
+ fuel: 30G \ No newline at end of file
diff --git a/fuel/deploy/baremetal/dea.yaml b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dea.yaml
index eb3019c..328dd6b 100644
--- a/fuel/deploy/baremetal/dea.yaml
+++ b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dea.yaml
@@ -8,13 +8,29 @@ environment_mode: multinode
wanted_release: Juno on Ubuntu 12.04.4
nodes:
- id: 1
- interfaces: interface1
- transformations: controller1
- role: controller
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
- id: 2
- interfaces: interface1
- transformations: compute1
- role: compute
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 3
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 4
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 5
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 6
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
fuel:
ADMIN_NETWORK:
ipaddress: 10.40.0.2
@@ -28,11 +44,11 @@ fuel:
user: admin
password: admin
HOSTNAME: opnfv
- NTP1: 0.ca.pool.ntp.org
- NTP2: 1.ca.pool.ntp.org
- NTP3: 2.ca.pool.ntp.org
+ NTP1: 10.118.34.219
+ NTP2:
+ NTP3:
interfaces:
- interface1:
+ interfaces_1:
eth0:
- fuelweb_admin
eth2:
@@ -41,7 +57,7 @@ interfaces:
- storage
- private
transformations:
- controller1:
+ transformations_1:
- action: add-br
name: br-eth0
- action: add-port
@@ -80,6 +96,8 @@ transformations:
name: br-storage
- action: add-br
name: br-fw-admin
+ - action: add-br
+ name: br-prv
- action: add-patch
bridges:
- br-eth2
@@ -104,25 +122,15 @@ transformations:
bridges:
- br-eth0
- br-fw-admin
- trunks:
- - 0
- action: add-patch
bridges:
- br-eth2
- br-ex
- tags:
- - 120
- - 0
- vlan_ids:
- - 120
- - 0
- - action: add-br
- name: br-prv
- action: add-patch
bridges:
- br-eth2
- br-prv
- compute1:
+ transformations_2:
- action: add-br
name: br-eth0
- action: add-port
@@ -159,6 +167,8 @@ transformations:
name: br-storage
- action: add-br
name: br-fw-admin
+ - action: add-br
+ name: br-prv
- action: add-patch
bridges:
- br-eth2
@@ -183,10 +193,6 @@ transformations:
bridges:
- br-eth0
- br-fw-admin
- trunks:
- - 0
- - action: add-br
- name: br-prv
- action: add-patch
bridges:
- br-eth2
@@ -199,10 +205,9 @@ network:
base_mac: fa:16:3e:00:00:00
dns_nameservers:
- 10.118.32.193
- - 8.8.8.8
floating_ranges:
- - - 172.16.0.130
- - 172.16.0.254
+ - - 10.118.36.48
+ - 10.118.36.62
gre_id_range:
- 2
- 65535
@@ -214,11 +219,11 @@ network:
- 2022
- 2023
networks:
- - cidr: 172.16.0.0/24
- gateway: 172.16.0.1
+ - cidr: 10.118.36.32/27
+ gateway: 10.118.36.1
ip_ranges:
- - - 172.16.0.2
- - 172.16.0.126
+ - - 10.118.36.33
+ - 10.118.36.47
meta:
assign_vip: true
cidr: 172.16.0.0/24
@@ -235,7 +240,7 @@ network:
use_gateway: true
vlan_start: null
name: public
- vlan_start: 120
+ vlan_start: null
- cidr: 192.168.0.0/24
gateway: null
ip_ranges:
@@ -251,7 +256,7 @@ network:
render_addr_mask: internal
render_type: cidr
use_gateway: false
- vlan_start: 101
+ vlan_start: 320
name: management
vlan_start: 320
- cidr: 192.168.1.0/24
@@ -269,7 +274,7 @@ network:
render_addr_mask: storage
render_type: cidr
use_gateway: false
- vlan_start: 102
+ vlan_start: 220
name: storage
vlan_start: 220
- cidr: null
@@ -501,7 +506,7 @@ settings:
description: List of upstream DNS servers, separated by comma
label: DNS list
type: text
- value: 10.118.32.193, 8.8.8.8
+ value: 10.118.32.193
weight: 10
metadata:
label: Upstream DNS
@@ -514,7 +519,7 @@ settings:
description: List of upstream NTP servers, separated by comma
label: NTP servers list
type: text
- value: 0.pool.ntp.org, 1.pool.ntp.org
+ value: 10.118.34.219
weight: 10
kernel_params:
kernel:
@@ -604,7 +609,7 @@ settings:
weight: 20
nsx_controllers:
description: One or more IPv4[:port] addresses of NSX controller node, separated
- by comma (e.g. 10.40.30.2,192.168.110.254:443)
+ by comma (e.g. 10.30.30.2,192.168.110.254:443)
label: NSX controller endpoint
regex:
error: Invalid controller endpoints, specify valid IPv4[:port] pair
@@ -698,14 +703,14 @@ settings:
restrictions:
- settings:common.libvirt_type.value == 'vcenter'
type: checkbox
- value: false
+ value: true
weight: 75
images_ceph:
description: Configures Glance to use the Ceph RBD backend to store images.
If enabled, this option will prevent Swift from installing.
label: Ceph RBD for images (Glance)
type: checkbox
- value: false
+ value: true
weight: 30
images_vcenter:
description: Configures Glance to use the vCenter/ESXi backend to store images.
@@ -839,7 +844,7 @@ settings:
- settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
== 'vcenter'
type: checkbox
- value: false
+ value: true
weight: 20
volumes_lvm:
description: Requires at least one Storage - Cinder LVM node.
@@ -979,4 +984,4 @@ settings:
label: username
type: text
value: admin
- weight: 10
+ weight: 10 \ No newline at end of file
diff --git a/fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dha.yaml b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dha.yaml
new file mode 100644
index 0000000..562d6cd
--- /dev/null
+++ b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dha.yaml
@@ -0,0 +1,54 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Mon May 4 09:03:46 UTC 2015
+comment: Test environment Ericsson Montreal
+
+# Adapter to use for this definition
+adapter: hp
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+ pxeMac: 14:58:D0:54:7A:D8
+ ipmiIp: 10.118.32.198
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 2
+ pxeMac: 14:58:D0:55:E2:E0
+ ipmiIp: 10.118.32.202
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 3
+ pxeMac: 9C:B6:54:8A:25:C0
+ ipmiIp: 10.118.32.213
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 4
+ pxeMac: 14:58:D0:54:28:80
+ ipmiIp: 10.118.32.201
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 5
+ pxeMac: 14:58:D0:54:E7:88
+ ipmiIp: 10.118.32.203
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 6
+ pxeMac: 14:58:D0:54:7A:28
+ ipmiIp: 10.118.32.205
+ ipmiUser: <username>
+ ipmiPass: <password>
+# Adding the Fuel node as node id 7 which may not be correct - please
+# adjust as needed.
+- id: 7
+ libvirtName: vFuel
+ libvirtTemplate: baremetal/vms/fuel.xml
+ isFuel: yes
+ username: root
+ password: r00tme
+
+disks:
+ fuel: 30G \ No newline at end of file
diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml
new file mode 100644
index 0000000..2528229
--- /dev/null
+++ b/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml
@@ -0,0 +1,950 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: Thu May 21 13:34:13 CEST 2015
+comment: HA deployment with Ceph
+environment_name: opnfv
+environment_mode: ha
+wanted_release: Juno on Ubuntu 12.04.4
+nodes:
+- id: 1
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 2
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 3
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 4
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 5
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+fuel:
+ ADMIN_NETWORK:
+ ipaddress: 10.20.0.2
+ netmask: 255.255.0.0
+ dhcp_pool_start: 10.20.0.3
+ dhcp_pool_end: 10.20.0.254
+ DNS_UPSTREAM: 8.8.8.8
+ DNS_DOMAIN: domain.tld
+ DNS_SEARCH: domain.tld
+ FUEL_ACCESS:
+ user: admin
+ password: admin
+ HOSTNAME: opnfv
+ NTP1: 0.pool.ntp.org
+ NTP2: 1.pool.ntp.org
+ NTP3: 2.pool.ntp.org
+interfaces:
+ interfaces_1:
+ eth0:
+ - public
+ eth1:
+ - fuelweb_admin
+ - management
+ - storage
+ - private
+transformations:
+ transformations_1:
+ - action: add-br
+ name: br-eth0
+ - action: add-port
+ bridge: br-eth0
+ name: eth0
+ - action: add-br
+ name: br-eth1
+ - action: add-port
+ bridge: br-eth1
+ name: eth1
+ - action: add-br
+ name: br-ex
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-fw-admin
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-storage
+ tags:
+ - 301
+ - 0
+ vlan_ids:
+ - 301
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-mgmt
+ tags:
+ - 300
+ - 0
+ vlan_ids:
+ - 300
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-fw-admin
+ trunks:
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth0
+ - br-ex
+ trunks:
+ - 0
+ - action: add-br
+ name: br-prv
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-prv
+ transformations_2:
+ - action: add-br
+ name: br-eth0
+ - action: add-port
+ bridge: br-eth0
+ name: eth0
+ - action: add-br
+ name: br-eth1
+ - action: add-port
+ bridge: br-eth1
+ name: eth1
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-fw-admin
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-storage
+ tags:
+ - 301
+ - 0
+ vlan_ids:
+ - 301
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-mgmt
+ tags:
+ - 300
+ - 0
+ vlan_ids:
+ - 300
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-fw-admin
+ trunks:
+ - 0
+ - action: add-br
+ name: br-prv
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-prv
+opnfv:
+ compute: {}
+ controller: {}
+network:
+ networking_parameters:
+ base_mac: fa:16:3e:00:00:00
+ dns_nameservers:
+ - 8.8.4.4
+ - 8.8.8.8
+ floating_ranges:
+ - - 172.30.9.80
+ - 172.30.9.89
+ gre_id_range:
+ - 2
+ - 65535
+ internal_cidr: 192.168.111.0/24
+ internal_gateway: 192.168.111.1
+ net_l23_provider: ovs
+ segmentation_type: vlan
+ vlan_range:
+ - 1000
+ - 1010
+ networks:
+ - cidr: 172.30.9.0/24
+ gateway: 172.30.9.1
+ ip_ranges:
+ - - 172.30.9.70
+ - 172.30.9.70
+ meta:
+ assign_vip: true
+ cidr: 172.16.0.0/24
+ configurable: true
+ floating_range_var: floating_ranges
+ ip_range:
+ - 172.16.0.2
+ - 172.16.0.126
+ map_priority: 1
+ name: public
+ notation: ip_ranges
+ render_addr_mask: public
+ render_type: null
+ use_gateway: true
+ vlan_start: null
+ name: public
+ vlan_start: null
+ - cidr: 192.168.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.0.2
+ - 192.168.0.254
+ meta:
+ assign_vip: true
+ cidr: 192.168.0.0/24
+ configurable: true
+ map_priority: 2
+ name: management
+ notation: cidr
+ render_addr_mask: internal
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 101
+ name: management
+ vlan_start: 300
+ - cidr: 192.168.1.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.1.2
+ - 192.168.1.254
+ meta:
+ assign_vip: false
+ cidr: 192.168.1.0/24
+ configurable: true
+ map_priority: 2
+ name: storage
+ notation: cidr
+ render_addr_mask: storage
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 102
+ name: storage
+ vlan_start: 301
+ - cidr: null
+ gateway: null
+ ip_ranges: []
+ meta:
+ assign_vip: false
+ configurable: false
+ map_priority: 2
+ name: private
+ neutron_vlan_range: true
+ notation: null
+ render_addr_mask: null
+ render_type: null
+ seg_type: vlan
+ use_gateway: false
+ vlan_start: null
+ name: private
+ vlan_start: null
+ - cidr: 10.20.0.0/16
+ gateway: null
+ ip_ranges:
+ - - 10.20.0.3
+ - 10.20.255.254
+ meta:
+ assign_vip: false
+ configurable: false
+ map_priority: 0
+ notation: ip_ranges
+ render_addr_mask: null
+ render_type: null
+ unmovable: true
+ use_gateway: true
+ name: fuelweb_admin
+ vlan_start: null
+settings:
+ editable:
+ access:
+ email:
+ description: Email address for Administrator
+ label: email
+ type: text
+ value: admin@localhost
+ weight: 40
+ metadata:
+ label: Access
+ weight: 10
+ password:
+ description: Password for Administrator
+ label: password
+ type: password
+ value: admin
+ weight: 20
+ tenant:
+ description: Tenant (project) name for Administrator
+ label: tenant
+ regex:
+ error: Invalid tenant name
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+ type: text
+ value: admin
+ weight: 30
+ user:
+ description: Username for Administrator
+ label: username
+ regex:
+ error: Invalid username
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+ type: text
+ value: admin
+ weight: 10
+ additional_components:
+ ceilometer:
+ description: If selected, Ceilometer component will be installed
+ label: Install Ceilometer
+ type: checkbox
+ value: false
+ weight: 40
+ heat:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 30
+ metadata:
+ label: Additional Components
+ weight: 20
+ murano:
+ description: If selected, Murano component will be installed
+ label: Install Murano
+ restrictions:
+ - cluster:net_provider != 'neutron'
+ type: checkbox
+ value: false
+ weight: 20
+ sahara:
+ description: If selected, Sahara component will be installed
+ label: Install Sahara
+ type: checkbox
+ value: false
+ weight: 10
+ common:
+ auth_key:
+ description: Public key(s) to include in authorized_keys on deployed nodes
+ label: Public Key
+ type: text
+ value: ''
+ weight: 70
+ auto_assign_floating_ip:
+ description: If selected, OpenStack will automatically assign a floating IP
+ to a new instance
+ label: Auto assign floating IP
+ restrictions:
+ - cluster:net_provider == 'neutron'
+ type: checkbox
+ value: false
+ weight: 40
+ compute_scheduler_driver:
+ label: Scheduler driver
+ type: radio
+ value: nova.scheduler.filter_scheduler.FilterScheduler
+ values:
+ - data: nova.scheduler.filter_scheduler.FilterScheduler
+ description: Currently the most advanced OpenStack scheduler. See the OpenStack
+ documentation for details.
+ label: Filter scheduler
+ - data: nova.scheduler.simple.SimpleScheduler
+ description: This is 'naive' scheduler which tries to find the least loaded
+ host
+ label: Simple scheduler
+ weight: 40
+ debug:
+ description: Debug logging mode provides more information, but requires more
+ disk space.
+ label: OpenStack debug logging
+ type: checkbox
+ value: false
+ weight: 20
+ disable_offload:
+ description: If set, generic segmentation offload (gso) and generic receive
+ offload (gro) on physical nics will be disabled. See ethtool man.
+ label: Disable generic offload on physical nics
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
+ == 'gre'
+ type: checkbox
+ value: true
+ weight: 80
+ libvirt_type:
+ label: Hypervisor type
+ type: radio
+ value: kvm
+ values:
+ - data: kvm
+ description: Choose this type of hypervisor if you run OpenStack on hardware
+ label: KVM
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ - data: qemu
+ description: Choose this type of hypervisor if you run OpenStack on virtual
+ hosts.
+ label: QEMU
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ - data: vcenter
+ description: Choose this type of hypervisor if you run OpenStack in a vCenter
+ environment.
+ label: vCenter
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
+ == 'neutron'
+ weight: 30
+ metadata:
+ label: Common
+ weight: 30
+ nova_quota:
+ description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+ quotas will increase load on the Nova database.
+ label: Nova quotas
+ type: checkbox
+ value: false
+ weight: 25
+ resume_guests_state_on_host_boot:
+ description: Whether to resume previous guests state when the host reboots.
+ If enabled, this option causes guests assigned to the host to resume their
+ previous state. If the guest was running a restart will be attempted when
+ nova-compute starts. If the guest was not running previously, a restart
+ will not be attempted.
+ label: Resume guests state on host boot
+ type: checkbox
+ value: true
+ weight: 60
+ use_cow_images:
+ description: For most cases you will want qcow format. If it's disabled, raw
+ image format will be used to run VMs. OpenStack with raw format currently
+ does not support snapshotting.
+ label: Use qcow format for images
+ type: checkbox
+ value: true
+ weight: 50
+ corosync:
+ group:
+ description: ''
+ label: Group
+ type: text
+ value: 226.94.1.1
+ weight: 10
+ metadata:
+ label: Corosync
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 50
+ port:
+ description: ''
+ label: Port
+ type: text
+ value: '12000'
+ weight: 20
+ verified:
+ description: Set True only if multicast is configured correctly on router.
+ label: Need to pass network verification.
+ type: checkbox
+ value: false
+ weight: 10
+ external_dns:
+ dns_list:
+ description: List of upstream DNS servers, separated by comma
+ label: DNS list
+ type: text
+ value: 8.8.8.8, 8.8.4.4
+ weight: 10
+ metadata:
+ label: Upstream DNS
+ weight: 90
+ external_ntp:
+ metadata:
+ label: Upstream NTP
+ weight: 100
+ ntp_list:
+ description: List of upstream NTP servers, separated by comma
+ label: NTP servers list
+ type: text
+ value: 0.pool.ntp.org, 1.pool.ntp.org
+ weight: 10
+ kernel_params:
+ kernel:
+ description: Default kernel parameters
+ label: Initial parameters
+ type: text
+ value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
+ weight: 45
+ metadata:
+ label: Kernel parameters
+ weight: 40
+ neutron_mellanox:
+ metadata:
+ enabled: true
+ label: Mellanox Neutron components
+ toggleable: false
+ weight: 50
+ plugin:
+ label: Mellanox drivers and SR-IOV plugin
+ type: radio
+ value: disabled
+ values:
+ - data: disabled
+ description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+ not be installed.
+ label: Mellanox drivers and plugins disabled
+ restrictions:
+ - settings:storage.iser.value == true
+ - data: drivers_only
+ description: If selected, Mellanox Ethernet drivers will be installed to
+ support networking over Mellanox NIC. Mellanox Neutron plugin will not
+ be installed.
+ label: Install only Mellanox drivers
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm'
+ - data: ethernet
+ description: If selected, both Mellanox Ethernet drivers and Mellanox network
+ acceleration (Neutron) plugin will be installed.
+ label: Install Mellanox drivers and SR-IOV plugin
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+ == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+ weight: 60
+ vf_num:
+ description: Note that one virtual function will be reserved to the storage
+ network, in case of choosing iSER.
+ label: Number of virtual NICs
+ restrictions:
+ - settings:neutron_mellanox.plugin.value != 'ethernet'
+ type: text
+ value: '16'
+ weight: 70
+ nsx_plugin:
+ connector_type:
+ description: Default network transport type to use
+ label: NSX connector type
+ type: select
+ value: stt
+ values:
+ - data: gre
+ label: GRE
+ - data: ipsec_gre
+ label: GRE over IPSec
+ - data: stt
+ label: STT
+ - data: ipsec_stt
+ label: STT over IPSec
+ - data: bridge
+ label: Bridge
+ weight: 80
+ l3_gw_service_uuid:
+ description: UUID for the default L3 gateway service to use with this cluster
+ label: L3 service UUID
+ regex:
+ error: Invalid L3 gateway service UUID
+ source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+ type: text
+ value: ''
+ weight: 50
+ metadata:
+ enabled: false
+ label: VMware NSX
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
+ != 'nsx'
+ weight: 20
+ nsx_controllers:
+ description: One or more IPv4[:port] addresses of NSX controller node, separated
+ by comma (e.g. 10.30.30.2,192.168.110.254:443)
+ label: NSX controller endpoint
+ regex:
+ error: Invalid controller endpoints, specify valid IPv4[:port] pair
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
+ type: text
+ value: ''
+ weight: 60
+ nsx_password:
+ description: Password for Administrator
+ label: NSX password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: ''
+ weight: 30
+ nsx_username:
+ description: NSX administrator's username
+ label: NSX username
+ regex:
+ error: Empty username
+ source: \S
+ type: text
+ value: admin
+ weight: 20
+ packages_url:
+ description: URL to NSX specific packages
+ label: URL to NSX bits
+ regex:
+ error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
+ http://10.20.0.2/nsx)
+ source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
+ type: text
+ value: ''
+ weight: 70
+ replication_mode:
+ description: ''
+ label: NSX cluster has Service nodes
+ type: checkbox
+ value: true
+ weight: 90
+ transport_zone_uuid:
+ description: UUID of the pre-existing default NSX Transport zone
+ label: Transport zone UUID
+ regex:
+ error: Invalid transport zone UUID
+ source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+ type: text
+ value: ''
+ weight: 40
+ provision:
+ metadata:
+ label: Provision
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ weight: 80
+ method:
+ description: Which provision method to use for this cluster.
+ label: Provision method
+ type: radio
+ value: cobbler
+ values:
+ - data: image
+ description: Copying pre-built images on a disk.
+ label: Image
+ - data: cobbler
+ description: Install from scratch using anaconda or debian-installer.
+ label: Classic (use anaconda or debian-installer)
+ public_network_assignment:
+ assign_to_all_nodes:
+ description: When disabled, public network will be assigned to controllers
+ and zabbix-server only
+ label: Assign public network to all nodes
+ type: checkbox
+ value: false
+ weight: 10
+ metadata:
+ label: Public network assignment
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron'
+ weight: 50
+ storage:
+ ephemeral_ceph:
+ description: Configures Nova to store ephemeral volumes in RBD. This works
+ best if Ceph is enabled for volumes and images, too. Enables live migration
+ of all types of Ceph backed VMs (without this option, live migration will
+ only work with VMs launched from Cinder volumes).
+ label: Ceph RBD for ephemeral volumes (Nova)
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ type: checkbox
+ value: true
+ weight: 75
+ images_ceph:
+ description: Configures Glance to use the Ceph RBD backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: Ceph RBD for images (Glance)
+ type: checkbox
+ value: true
+ weight: 30
+ images_vcenter:
+ description: Configures Glance to use the vCenter/ESXi backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: VMWare vCenter/ESXi datastore for images (Glance)
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter'
+ type: checkbox
+ value: false
+ weight: 35
+ iser:
+ description: 'High performance block storage: Cinder volumes over iSER protocol
+ (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
+ and will use a dedicated virtual function for the storage network.'
+ label: iSER protocol for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+ != 'kvm'
+ type: checkbox
+ value: false
+ weight: 11
+ metadata:
+ label: Storage
+ weight: 60
+ objects_ceph:
+ description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+ Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+ label: Ceph RadosGW for objects (Swift API)
+ restrictions:
+ - settings:storage.images_ceph.value == false
+ type: checkbox
+ value: false
+ weight: 80
+ osd_pool_size:
+ description: Configures the default number of object replicas in Ceph. This
+ number must be equal to or lower than the number of deployed 'Storage -
+ Ceph OSD' nodes.
+ label: Ceph object replication factor
+ regex:
+ error: Invalid number
+ source: ^[1-9]\d*$
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ type: text
+ value: '2'
+ weight: 85
+ vc_datacenter:
+ description: Inventory path to a datacenter. If you want to use ESXi host
+ as datastore, it should be "ha-datacenter".
+ label: Datacenter name
+ regex:
+ error: Empty datacenter
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 65
+ vc_datastore:
+ description: Datastore associated with the datacenter.
+ label: Datastore name
+ regex:
+ error: Empty datastore
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 60
+ vc_host:
+ description: IP Address of vCenter/ESXi
+ label: vCenter/ESXi IP
+ regex:
+ error: Specify valid IPv4 address
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 45
+ vc_image_dir:
+ description: The name of the directory where the glance images will be stored
+ in the VMware datastore.
+ label: Datastore Images directory
+ regex:
+ error: Empty images directory
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: /openstack_glance
+ weight: 70
+ vc_password:
+ description: vCenter/ESXi admin password
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: password
+ value: ''
+ weight: 55
+ vc_user:
+ description: vCenter/ESXi admin username
+ label: Username
+ regex:
+ error: Empty username
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 50
+ volumes_ceph:
+ description: Configures Cinder to store volumes in Ceph RBD images.
+ label: Ceph RBD for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
+ == 'vcenter'
+ type: checkbox
+ value: true
+ weight: 20
+ volumes_lvm:
+ description: Requires at least one Storage - Cinder LVM node.
+ label: Cinder LVM over iSCSI for volumes
+ restrictions:
+ - settings:storage.volumes_ceph.value == true
+ type: checkbox
+ value: false
+ weight: 10
+ volumes_vmdk:
+ description: Configures Cinder to store volumes via VMware vCenter.
+ label: VMware vCenter for volumes (Cinder)
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
+ == true
+ type: checkbox
+ value: false
+ weight: 15
+ syslog:
+ metadata:
+ label: Syslog
+ weight: 50
+ syslog_port:
+ description: Remote syslog port
+ label: Port
+ regex:
+ error: Invalid Syslog port
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '514'
+ weight: 20
+ syslog_server:
+ description: Remote syslog hostname
+ label: Hostname
+ type: text
+ value: ''
+ weight: 10
+ syslog_transport:
+ label: Syslog transport protocol
+ type: radio
+ value: tcp
+ values:
+ - data: udp
+ description: ''
+ label: UDP
+ - data: tcp
+ description: ''
+ label: TCP
+ weight: 30
+ vcenter:
+ cluster:
+ description: vCenter cluster name. If you have multiple clusters, use comma
+ to separate names
+ label: Cluster
+ regex:
+ error: Invalid cluster list
+ source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
+ type: text
+ value: ''
+ weight: 40
+ datastore_regex:
+ description: The Datastore regexp setting specifies the data stores to use
+ with Compute. For example, "nas.*". If you want to use all available datastores,
+ leave this field blank
+ label: Datastore regexp
+ regex:
+ error: Invalid datastore regexp
+ source: ^(\S.*\S|\S|)$
+ type: text
+ value: ''
+ weight: 50
+ host_ip:
+ description: IP Address of vCenter
+ label: vCenter IP
+ regex:
+ error: Specify valid IPv4 address
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+ type: text
+ value: ''
+ weight: 10
+ metadata:
+ label: vCenter
+ restrictions:
+ - action: hide
+ condition: settings:common.libvirt_type.value != 'vcenter'
+ weight: 20
+ use_vcenter:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 5
+ vc_password:
+ description: vCenter admin password
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: admin
+ weight: 30
+ vc_user:
+ description: vCenter admin username
+ label: Username
+ regex:
+ error: Empty username
+ source: \S
+ type: text
+ value: admin
+ weight: 20
+ vlan_interface:
+ description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
+ vmnic1). If empty "vmnic0" is used by default
+ label: ESXi VLAN interface
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
+ != 'VlanManager'
+ type: text
+ value: ''
+ weight: 60
+ zabbix:
+ metadata:
+ label: Zabbix Access
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ weight: 70
+ password:
+ description: Password for Zabbix Administrator
+ label: password
+ type: password
+ value: zabbix
+ weight: 20
+ username:
+ description: Username for Zabbix Administrator
+ label: username
+ type: text
+ value: admin
+ weight: 10 \ No newline at end of file
diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dha.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dha.yaml
new file mode 100644
index 0000000..5acd389
--- /dev/null
+++ b/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dha.yaml
@@ -0,0 +1,49 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Fri May 8 08:03:49 UTC 2015
+comment: Config for LF Pod1
+
+# Adapter to use for this definition
+adapter: ipmi
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+ pxeMac: 00:25:b5:b0:00:ef
+ ipmiIp: 172.30.8.69
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 2
+ pxeMac: 00:25:b5:b0:00:cf
+ ipmiIp: 172.30.8.78
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 3
+ pxeMac: 00:25:b5:b0:00:8f
+ ipmiIp: 172.30.8.68
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 4
+ pxeMac: 00:25:b5:b0:00:6f
+ ipmiIp: 172.30.8.77
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 5
+ pxeMac: 00:25:b5:b0:00:4f
+ ipmiIp: 172.30.8.67
+ ipmiUser: admin
+ ipmiPass: octopus
+# Adding the Fuel node as node id 6 which may not be correct - please
+# adjust as needed.
+- id: 6
+ libvirtName: vFuel
+ libvirtTemplate: baremetal/vms/fuel_lf.xml
+ isFuel: yes
+ username: root
+ password: r00tme
+
+disks:
+ fuel: 30G \ No newline at end of file
diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml
new file mode 100644
index 0000000..2387443
--- /dev/null
+++ b/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml
@@ -0,0 +1,950 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: Tue May 5 15:33:07 UTC 2015
+comment: Test environment Ericsson Montreal
+environment_name: opnfv
+environment_mode: multinode
+wanted_release: Juno on Ubuntu 12.04.4
+nodes:
+- id: 1
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 2
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 3
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 4
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 5
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+fuel:
+ ADMIN_NETWORK:
+ ipaddress: 10.20.0.2
+ netmask: 255.255.0.0
+ dhcp_pool_start: 10.20.0.3
+ dhcp_pool_end: 10.20.0.254
+ DNS_UPSTREAM: 8.8.8.8
+ DNS_DOMAIN: domain.tld
+ DNS_SEARCH: domain.tld
+ FUEL_ACCESS:
+ user: admin
+ password: admin
+ HOSTNAME: opnfv
+ NTP1: 0.pool.ntp.org
+ NTP2: 1.pool.ntp.org
+ NTP3: 2.pool.ntp.org
+interfaces:
+ interfaces_1:
+ eth0:
+ - public
+ eth1:
+ - fuelweb_admin
+ - management
+ - storage
+ - private
+transformations:
+ transformations_1:
+ - action: add-br
+ name: br-eth0
+ - action: add-port
+ bridge: br-eth0
+ name: eth0
+ - action: add-br
+ name: br-eth1
+ - action: add-port
+ bridge: br-eth1
+ name: eth1
+ - action: add-br
+ name: br-ex
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-fw-admin
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-storage
+ tags:
+ - 301
+ - 0
+ vlan_ids:
+ - 301
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-mgmt
+ tags:
+ - 300
+ - 0
+ vlan_ids:
+ - 300
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-fw-admin
+ trunks:
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth0
+ - br-ex
+ trunks:
+ - 0
+ - action: add-br
+ name: br-prv
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-prv
+ transformations_2:
+ - action: add-br
+ name: br-eth0
+ - action: add-port
+ bridge: br-eth0
+ name: eth0
+ - action: add-br
+ name: br-eth1
+ - action: add-port
+ bridge: br-eth1
+ name: eth1
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-fw-admin
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-storage
+ tags:
+ - 301
+ - 0
+ vlan_ids:
+ - 301
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-mgmt
+ tags:
+ - 300
+ - 0
+ vlan_ids:
+ - 300
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-fw-admin
+ trunks:
+ - 0
+ - action: add-br
+ name: br-prv
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-prv
+opnfv:
+ compute: {}
+ controller: {}
+network:
+ networking_parameters:
+ base_mac: fa:16:3e:00:00:00
+ dns_nameservers:
+ - 8.8.4.4
+ - 8.8.8.8
+ floating_ranges:
+ - - 172.30.9.80
+ - 172.30.9.89
+ gre_id_range:
+ - 2
+ - 65535
+ internal_cidr: 192.168.111.0/24
+ internal_gateway: 192.168.111.1
+ net_l23_provider: ovs
+ segmentation_type: vlan
+ vlan_range:
+ - 1000
+ - 1010
+ networks:
+ - cidr: 172.30.9.0/24
+ gateway: 172.30.9.1
+ ip_ranges:
+ - - 172.30.9.70
+ - 172.30.9.70
+ meta:
+ assign_vip: true
+ cidr: 172.16.0.0/24
+ configurable: true
+ floating_range_var: floating_ranges
+ ip_range:
+ - 172.16.0.2
+ - 172.16.0.126
+ map_priority: 1
+ name: public
+ notation: ip_ranges
+ render_addr_mask: public
+ render_type: null
+ use_gateway: true
+ vlan_start: null
+ name: public
+ vlan_start: null
+ - cidr: 192.168.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.0.2
+ - 192.168.0.254
+ meta:
+ assign_vip: true
+ cidr: 192.168.0.0/24
+ configurable: true
+ map_priority: 2
+ name: management
+ notation: cidr
+ render_addr_mask: internal
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 101
+ name: management
+ vlan_start: 300
+ - cidr: 192.168.1.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.1.2
+ - 192.168.1.254
+ meta:
+ assign_vip: false
+ cidr: 192.168.1.0/24
+ configurable: true
+ map_priority: 2
+ name: storage
+ notation: cidr
+ render_addr_mask: storage
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 102
+ name: storage
+ vlan_start: 301
+ - cidr: null
+ gateway: null
+ ip_ranges: []
+ meta:
+ assign_vip: false
+ configurable: false
+ map_priority: 2
+ name: private
+ neutron_vlan_range: true
+ notation: null
+ render_addr_mask: null
+ render_type: null
+ seg_type: vlan
+ use_gateway: false
+ vlan_start: null
+ name: private
+ vlan_start: null
+ - cidr: 10.20.0.0/16
+ gateway: null
+ ip_ranges:
+ - - 10.20.0.3
+ - 10.20.255.254
+ meta:
+ assign_vip: false
+ configurable: false
+ map_priority: 0
+ notation: ip_ranges
+ render_addr_mask: null
+ render_type: null
+ unmovable: true
+ use_gateway: true
+ name: fuelweb_admin
+ vlan_start: null
+settings:
+ editable:
+ access:
+ email:
+ description: Email address for Administrator
+ label: email
+ type: text
+ value: admin@localhost
+ weight: 40
+ metadata:
+ label: Access
+ weight: 10
+ password:
+ description: Password for Administrator
+ label: password
+ type: password
+ value: admin
+ weight: 20
+ tenant:
+ description: Tenant (project) name for Administrator
+ label: tenant
+ regex:
+ error: Invalid tenant name
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+ type: text
+ value: admin
+ weight: 30
+ user:
+ description: Username for Administrator
+ label: username
+ regex:
+ error: Invalid username
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+ type: text
+ value: admin
+ weight: 10
+ additional_components:
+ ceilometer:
+ description: If selected, Ceilometer component will be installed
+ label: Install Ceilometer
+ type: checkbox
+ value: false
+ weight: 40
+ heat:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 30
+ metadata:
+ label: Additional Components
+ weight: 20
+ murano:
+ description: If selected, Murano component will be installed
+ label: Install Murano
+ restrictions:
+ - cluster:net_provider != 'neutron'
+ type: checkbox
+ value: false
+ weight: 20
+ sahara:
+ description: If selected, Sahara component will be installed
+ label: Install Sahara
+ type: checkbox
+ value: false
+ weight: 10
+ common:
+ auth_key:
+ description: Public key(s) to include in authorized_keys on deployed nodes
+ label: Public Key
+ type: text
+ value: ''
+ weight: 70
+ auto_assign_floating_ip:
+ description: If selected, OpenStack will automatically assign a floating IP
+ to a new instance
+ label: Auto assign floating IP
+ restrictions:
+ - cluster:net_provider == 'neutron'
+ type: checkbox
+ value: false
+ weight: 40
+ compute_scheduler_driver:
+ label: Scheduler driver
+ type: radio
+ value: nova.scheduler.filter_scheduler.FilterScheduler
+ values:
+ - data: nova.scheduler.filter_scheduler.FilterScheduler
+ description: Currently the most advanced OpenStack scheduler. See the OpenStack
+ documentation for details.
+ label: Filter scheduler
+ - data: nova.scheduler.simple.SimpleScheduler
+ description: This is 'naive' scheduler which tries to find the least loaded
+ host
+ label: Simple scheduler
+ weight: 40
+ debug:
+ description: Debug logging mode provides more information, but requires more
+ disk space.
+ label: OpenStack debug logging
+ type: checkbox
+ value: false
+ weight: 20
+ disable_offload:
+ description: If set, generic segmentation offload (gso) and generic receive
+ offload (gro) on physical nics will be disabled. See ethtool man.
+ label: Disable generic offload on physical nics
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
+ == 'gre'
+ type: checkbox
+ value: true
+ weight: 80
+ libvirt_type:
+ label: Hypervisor type
+ type: radio
+ value: kvm
+ values:
+ - data: kvm
+ description: Choose this type of hypervisor if you run OpenStack on hardware
+ label: KVM
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ - data: qemu
+ description: Choose this type of hypervisor if you run OpenStack on virtual
+ hosts.
+ label: QEMU
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ - data: vcenter
+ description: Choose this type of hypervisor if you run OpenStack in a vCenter
+ environment.
+ label: vCenter
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
+ == 'neutron'
+ weight: 30
+ metadata:
+ label: Common
+ weight: 30
+ nova_quota:
+ description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+ quotas will increase load on the Nova database.
+ label: Nova quotas
+ type: checkbox
+ value: false
+ weight: 25
+ resume_guests_state_on_host_boot:
+ description: Whether to resume previous guests state when the host reboots.
+ If enabled, this option causes guests assigned to the host to resume their
+ previous state. If the guest was running a restart will be attempted when
+ nova-compute starts. If the guest was not running previously, a restart
+ will not be attempted.
+ label: Resume guests state on host boot
+ type: checkbox
+ value: true
+ weight: 60
+ use_cow_images:
+ description: For most cases you will want qcow format. If it's disabled, raw
+ image format will be used to run VMs. OpenStack with raw format currently
+ does not support snapshotting.
+ label: Use qcow format for images
+ type: checkbox
+ value: true
+ weight: 50
+ corosync:
+ group:
+ description: ''
+ label: Group
+ type: text
+ value: 226.94.1.1
+ weight: 10
+ metadata:
+ label: Corosync
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 50
+ port:
+ description: ''
+ label: Port
+ type: text
+ value: '12000'
+ weight: 20
+ verified:
+ description: Set True only if multicast is configured correctly on router.
+ label: Need to pass network verification.
+ type: checkbox
+ value: false
+ weight: 10
+ external_dns:
+ dns_list:
+ description: List of upstream DNS servers, separated by comma
+ label: DNS list
+ type: text
+ value: 8.8.8.8, 8.8.4.4
+ weight: 10
+ metadata:
+ label: Upstream DNS
+ weight: 90
+ external_ntp:
+ metadata:
+ label: Upstream NTP
+ weight: 100
+ ntp_list:
+ description: List of upstream NTP servers, separated by comma
+ label: NTP servers list
+ type: text
+ value: 0.pool.ntp.org, 1.pool.ntp.org
+ weight: 10
+ kernel_params:
+ kernel:
+ description: Default kernel parameters
+ label: Initial parameters
+ type: text
+ value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
+ weight: 45
+ metadata:
+ label: Kernel parameters
+ weight: 40
+ neutron_mellanox:
+ metadata:
+ enabled: true
+ label: Mellanox Neutron components
+ toggleable: false
+ weight: 50
+ plugin:
+ label: Mellanox drivers and SR-IOV plugin
+ type: radio
+ value: disabled
+ values:
+ - data: disabled
+ description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+ not be installed.
+ label: Mellanox drivers and plugins disabled
+ restrictions:
+ - settings:storage.iser.value == true
+ - data: drivers_only
+ description: If selected, Mellanox Ethernet drivers will be installed to
+ support networking over Mellanox NIC. Mellanox Neutron plugin will not
+ be installed.
+ label: Install only Mellanox drivers
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm'
+ - data: ethernet
+ description: If selected, both Mellanox Ethernet drivers and Mellanox network
+ acceleration (Neutron) plugin will be installed.
+ label: Install Mellanox drivers and SR-IOV plugin
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+ == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+ weight: 60
+ vf_num:
+ description: Note that one virtual function will be reserved to the storage
+ network, in case of choosing iSER.
+ label: Number of virtual NICs
+ restrictions:
+ - settings:neutron_mellanox.plugin.value != 'ethernet'
+ type: text
+ value: '16'
+ weight: 70
+ nsx_plugin:
+ connector_type:
+ description: Default network transport type to use
+ label: NSX connector type
+ type: select
+ value: stt
+ values:
+ - data: gre
+ label: GRE
+ - data: ipsec_gre
+ label: GRE over IPSec
+ - data: stt
+ label: STT
+ - data: ipsec_stt
+ label: STT over IPSec
+ - data: bridge
+ label: Bridge
+ weight: 80
+ l3_gw_service_uuid:
+ description: UUID for the default L3 gateway service to use with this cluster
+ label: L3 service UUID
+ regex:
+ error: Invalid L3 gateway service UUID
+ source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+ type: text
+ value: ''
+ weight: 50
+ metadata:
+ enabled: false
+ label: VMware NSX
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
+ != 'nsx'
+ weight: 20
+ nsx_controllers:
+ description: One or more IPv4[:port] addresses of NSX controller node, separated
+ by comma (e.g. 10.30.30.2,192.168.110.254:443)
+ label: NSX controller endpoint
+ regex:
+ error: Invalid controller endpoints, specify valid IPv4[:port] pair
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
+ type: text
+ value: ''
+ weight: 60
+ nsx_password:
+ description: Password for Administrator
+ label: NSX password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: ''
+ weight: 30
+ nsx_username:
+ description: NSX administrator's username
+ label: NSX username
+ regex:
+ error: Empty username
+ source: \S
+ type: text
+ value: admin
+ weight: 20
+ packages_url:
+ description: URL to NSX specific packages
+ label: URL to NSX bits
+ regex:
+ error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
+ http://10.20.0.2/nsx)
+ source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
+ type: text
+ value: ''
+ weight: 70
+ replication_mode:
+ description: ''
+ label: NSX cluster has Service nodes
+ type: checkbox
+ value: true
+ weight: 90
+ transport_zone_uuid:
+ description: UUID of the pre-existing default NSX Transport zone
+ label: Transport zone UUID
+ regex:
+ error: Invalid transport zone UUID
+ source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+ type: text
+ value: ''
+ weight: 40
+ provision:
+ metadata:
+ label: Provision
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ weight: 80
+ method:
+ description: Which provision method to use for this cluster.
+ label: Provision method
+ type: radio
+ value: cobbler
+ values:
+ - data: image
+ description: Copying pre-built images on a disk.
+ label: Image
+ - data: cobbler
+ description: Install from scratch using anaconda or debian-installer.
+ label: Classic (use anaconda or debian-installer)
+ public_network_assignment:
+ assign_to_all_nodes:
+ description: When disabled, public network will be assigned to controllers
+ and zabbix-server only
+ label: Assign public network to all nodes
+ type: checkbox
+ value: false
+ weight: 10
+ metadata:
+ label: Public network assignment
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron'
+ weight: 50
+ storage:
+ ephemeral_ceph:
+ description: Configures Nova to store ephemeral volumes in RBD. This works
+ best if Ceph is enabled for volumes and images, too. Enables live migration
+ of all types of Ceph backed VMs (without this option, live migration will
+ only work with VMs launched from Cinder volumes).
+ label: Ceph RBD for ephemeral volumes (Nova)
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ type: checkbox
+ value: true
+ weight: 75
+ images_ceph:
+ description: Configures Glance to use the Ceph RBD backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: Ceph RBD for images (Glance)
+ type: checkbox
+ value: true
+ weight: 30
+ images_vcenter:
+ description: Configures Glance to use the vCenter/ESXi backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: VMWare vCenter/ESXi datastore for images (Glance)
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter'
+ type: checkbox
+ value: false
+ weight: 35
+ iser:
+ description: 'High performance block storage: Cinder volumes over iSER protocol
+ (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
+ and will use a dedicated virtual function for the storage network.'
+ label: iSER protocol for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+ != 'kvm'
+ type: checkbox
+ value: false
+ weight: 11
+ metadata:
+ label: Storage
+ weight: 60
+ objects_ceph:
+ description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+ Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+ label: Ceph RadosGW for objects (Swift API)
+ restrictions:
+ - settings:storage.images_ceph.value == false
+ type: checkbox
+ value: false
+ weight: 80
+ osd_pool_size:
+ description: Configures the default number of object replicas in Ceph. This
+ number must be equal to or lower than the number of deployed 'Storage -
+ Ceph OSD' nodes.
+ label: Ceph object replication factor
+ regex:
+ error: Invalid number
+ source: ^[1-9]\d*$
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ type: text
+ value: '2'
+ weight: 85
+ vc_datacenter:
+ description: Inventory path to a datacenter. If you want to use ESXi host
+ as datastore, it should be "ha-datacenter".
+ label: Datacenter name
+ regex:
+ error: Empty datacenter
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 65
+ vc_datastore:
+ description: Datastore associated with the datacenter.
+ label: Datastore name
+ regex:
+ error: Empty datastore
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 60
+ vc_host:
+ description: IP Address of vCenter/ESXi
+ label: vCenter/ESXi IP
+ regex:
+ error: Specify valid IPv4 address
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 45
+ vc_image_dir:
+ description: The name of the directory where the glance images will be stored
+ in the VMware datastore.
+ label: Datastore Images directory
+ regex:
+ error: Empty images directory
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: /openstack_glance
+ weight: 70
+ vc_password:
+ description: vCenter/ESXi admin password
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: password
+ value: ''
+ weight: 55
+ vc_user:
+ description: vCenter/ESXi admin username
+ label: Username
+ regex:
+ error: Empty username
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 50
+ volumes_ceph:
+ description: Configures Cinder to store volumes in Ceph RBD images.
+ label: Ceph RBD for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
+ == 'vcenter'
+ type: checkbox
+ value: true
+ weight: 20
+ volumes_lvm:
+ description: Requires at least one Storage - Cinder LVM node.
+ label: Cinder LVM over iSCSI for volumes
+ restrictions:
+ - settings:storage.volumes_ceph.value == true
+ type: checkbox
+ value: false
+ weight: 10
+ volumes_vmdk:
+ description: Configures Cinder to store volumes via VMware vCenter.
+ label: VMware vCenter for volumes (Cinder)
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
+ == true
+ type: checkbox
+ value: false
+ weight: 15
+ syslog:
+ metadata:
+ label: Syslog
+ weight: 50
+ syslog_port:
+ description: Remote syslog port
+ label: Port
+ regex:
+ error: Invalid Syslog port
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '514'
+ weight: 20
+ syslog_server:
+ description: Remote syslog hostname
+ label: Hostname
+ type: text
+ value: ''
+ weight: 10
+ syslog_transport:
+ label: Syslog transport protocol
+ type: radio
+ value: tcp
+ values:
+ - data: udp
+ description: ''
+ label: UDP
+ - data: tcp
+ description: ''
+ label: TCP
+ weight: 30
+ vcenter:
+ cluster:
+ description: vCenter cluster name. If you have multiple clusters, use comma
+ to separate names
+ label: Cluster
+ regex:
+ error: Invalid cluster list
+ source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
+ type: text
+ value: ''
+ weight: 40
+ datastore_regex:
+ description: The Datastore regexp setting specifies the data stores to use
+ with Compute. For example, "nas.*". If you want to use all available datastores,
+ leave this field blank
+ label: Datastore regexp
+ regex:
+ error: Invalid datastore regexp
+ source: ^(\S.*\S|\S|)$
+ type: text
+ value: ''
+ weight: 50
+ host_ip:
+ description: IP Address of vCenter
+ label: vCenter IP
+ regex:
+ error: Specify valid IPv4 address
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+ type: text
+ value: ''
+ weight: 10
+ metadata:
+ label: vCenter
+ restrictions:
+ - action: hide
+ condition: settings:common.libvirt_type.value != 'vcenter'
+ weight: 20
+ use_vcenter:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 5
+ vc_password:
+ description: vCenter admin password
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: admin
+ weight: 30
+ vc_user:
+ description: vCenter admin username
+ label: Username
+ regex:
+ error: Empty username
+ source: \S
+ type: text
+ value: admin
+ weight: 20
+ vlan_interface:
+ description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
+ vmnic1). If empty "vmnic0" is used by default
+ label: ESXi VLAN interface
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
+ != 'VlanManager'
+ type: text
+ value: ''
+ weight: 60
+ zabbix:
+ metadata:
+ label: Zabbix Access
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ weight: 70
+ password:
+ description: Password for Zabbix Administrator
+ label: password
+ type: password
+ value: zabbix
+ weight: 20
+ username:
+ description: Username for Zabbix Administrator
+ label: username
+ type: text
+ value: admin
+ weight: 10 \ No newline at end of file
diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dha.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dha.yaml
new file mode 100644
index 0000000..5acd389
--- /dev/null
+++ b/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dha.yaml
@@ -0,0 +1,49 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Fri May 8 08:03:49 UTC 2015
+comment: Config for LF Pod1
+
+# Adapter to use for this definition
+adapter: ipmi
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+ pxeMac: 00:25:b5:b0:00:ef
+ ipmiIp: 172.30.8.69
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 2
+ pxeMac: 00:25:b5:b0:00:cf
+ ipmiIp: 172.30.8.78
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 3
+ pxeMac: 00:25:b5:b0:00:8f
+ ipmiIp: 172.30.8.68
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 4
+ pxeMac: 00:25:b5:b0:00:6f
+ ipmiIp: 172.30.8.77
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 5
+ pxeMac: 00:25:b5:b0:00:4f
+ ipmiIp: 172.30.8.67
+ ipmiUser: admin
+ ipmiPass: octopus
+# Adding the Fuel node as node id 6 which may not be correct - please
+# adjust as needed.
+- id: 6
+ libvirtName: vFuel
+ libvirtTemplate: baremetal/vms/fuel_lf.xml
+ isFuel: yes
+ username: root
+ password: r00tme
+
+disks:
+ fuel: 30G \ No newline at end of file
diff --git a/fuel/deploy/baremetal/dha.yaml b/fuel/deploy/baremetal/dha.yaml
deleted file mode 100644
index 6240f07..0000000
--- a/fuel/deploy/baremetal/dha.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-title: Deployment Hardware Adapter (DHA)
-# DHA API version supported
-version: 1.1
-created: Mon May 4 09:03:46 UTC 2015
-comment: Test environment Ericsson Montreal
-
-# Adapter to use for this definition
-adapter: ipmi
-
-# Node list.
-# Mandatory properties are id and role.
-# The MAC address of the PXE boot interface for Fuel is not
-# mandatory to be defined.
-# All other properties are adapter specific.
-
-nodes:
-- id: 1
- pxeMac: 14:58:D0:54:7A:28
- ipmiIp: 10.118.32.205
- ipmiUser: username
- ipmiPass: password
-- id: 2
- pxeMac: 14:58:D0:55:E2:E0
- ipmiIp: 10.118.32.202
- ipmiUser: username
- ipmiPass: password
-# Adding the Fuel node as node id 3 which may not be correct - please
-# adjust as needed.
-- id: 3
- libvirtName: vFuel
- libvirtTemplate: vFuel
- isFuel: yes
- username: root
- password: r00tme
-
-# Deployment power on strategy
-# all: Turn on all nodes at once. There will be no correlation
-# between the DHA and DEA node numbering. MAC addresses
-# will be used to select the node roles though.
-# sequence: Turn on the nodes in sequence starting with the lowest order
-# node and wait for the node to be detected by Fuel. Not until
-# the node has been detected and assigned a role will the next
-# node be turned on.
-powerOnStrategy: sequence
-
-# If fuelCustomInstall is set to true, Fuel is assumed to be installed by
-# calling the DHA adapter function "dha_fuelCustomInstall()" with two
-# arguments: node ID and the ISO file name to deploy. The custom install
-# function is then to handle all necessary logic to boot the Fuel master
-# from the ISO and then return.
-# Allowed values: true, false
-fuelCustomInstall: true
-
diff --git a/fuel/deploy/baremetal/vm/vFuel b/fuel/deploy/baremetal/vms/fuel.xml
index 1b4f4eb..9f1eeac 100644
--- a/fuel/deploy/baremetal/vm/vFuel
+++ b/fuel/deploy/baremetal/vms/fuel.xml
@@ -1,5 +1,5 @@
<domain type='kvm'>
- <name>vFuel</name>
+ <name>fuel</name>
<memory unit='KiB'>8290304</memory>
<currentMemory unit='KiB'>8290304</currentMemory>
<vcpu placement='static'>2</vcpu>
diff --git a/fuel/deploy/baremetal/vms/fuel_lf.xml b/fuel/deploy/baremetal/vms/fuel_lf.xml
new file mode 100644
index 0000000..2dd9738
--- /dev/null
+++ b/fuel/deploy/baremetal/vms/fuel_lf.xml
@@ -0,0 +1,93 @@
+<domain type='kvm' id='62'>
+ <name>vFuel</name>
+ <memory unit='KiB'>8290304</memory>
+ <currentMemory unit='KiB'>8290304</currentMemory>
+ <vcpu placement='static'>4</vcpu>
+ <resource>
+ <partition>/machine</partition>
+ </resource>
+ <os>
+ <type arch='x86_64' machine='pc-i440fx-rhel7.0.0'>hvm</type>
+ <boot dev='cdrom'/>
+ <boot dev='hd'/>
+ <bootmenu enable='no'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='custom' match='exact'>
+ <model fallback='allow'>SandyBridge</model>
+ </cpu>
+ <clock offset='utc'>
+ <timer name='rtc' tickpolicy='catchup'/>
+ <timer name='pit' tickpolicy='delay'/>
+ <timer name='hpet' present='no'/>
+ </clock>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <pm>
+ <suspend-to-mem enabled='no'/>
+ <suspend-to-disk enabled='no'/>
+ </pm>
+ <devices>
+ <emulator>/usr/libexec/qemu-kvm</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='/home/opnfv/images/vFuel.raw'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block' device='cdrom'>
+ <driver name='qemu' type='raw'/>
+ <target dev='hdb' bus='ide'/>
+ <readonly/>
+ </disk>
+ <controller type='usb' index='0' model='ich9-ehci1'>
+ </controller>
+ <controller type='usb' index='0' model='ich9-uhci1'>
+ <master startport='0'/>
+ </controller>
+ <controller type='usb' index='0' model='ich9-uhci2'>
+ <master startport='2'/>
+ </controller>
+ <controller type='usb' index='0' model='ich9-uhci3'>
+ <master startport='4'/>
+ </controller>
+ <controller type='pci' index='0' model='pci-root'>
+ </controller>
+ <controller type='ide' index='0'>
+ </controller>
+ <controller type='virtio-serial' index='0'>
+ </controller>
+ <interface type='bridge'>
+ <source bridge='pxebr'/>
+ <model type='virtio'/>
+ </interface>
+ <serial type='pty'>
+ <source path='/dev/pts/0'/>
+ <target port='0'/>
+ </serial>
+ <console type='pty' tty='/dev/pts/0'>
+ <source path='/dev/pts/0'/>
+ <target type='serial' port='0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1' keymap='sv'>
+ <listen type='address' address='127.0.0.1'/>
+ </graphics>
+ <sound model='ich6'>
+ </sound>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ </video>
+ <memballoon model='virtio'>
+ </memballoon>
+ </devices>
+ <seclabel type='dynamic' model='selinux' relabel='yes'>
+ <label>system_u:system_r:svirt_t:s0:c52,c932</label>
+ <imagelabel>system_u:object_r:svirt_image_t:s0:c52,c932</imagelabel>
+ </seclabel>
+</domain> \ No newline at end of file
diff --git a/fuel/deploy/cloud/configure_nodes.py b/fuel/deploy/cloud/configure_nodes.py
index 4d1315a..a2f2a10 100644
--- a/fuel/deploy/cloud/configure_nodes.py
+++ b/fuel/deploy/cloud/configure_nodes.py
@@ -26,7 +26,7 @@ class ConfigureNodes(object):
log('Configure nodes')
for node_id, roles_blade in self.node_id_roles_dict.iteritems():
exec_cmd('fuel node set --node-id %s --role %s --env %s'
- % (node_id, ','.join(roles_blade[0]), self.env_id))
+ % (node_id, roles_blade[0], self.env_id))
self.download_deployment_config()
for node_id, roles_blade in self.node_id_roles_dict.iteritems():
@@ -37,8 +37,7 @@ class ConfigureNodes(object):
self.upload_deployment_config()
def modify_node_network_schemes(self, node_id, roles_blade):
- log('Modify node network transformations in environment %s'
- % self.env_id)
+ log('Modify network transformations for node %s' % node_id)
type = self.dea.get_node_property(roles_blade[1], 'transformations')
transformations = self.dea.get_transformations(type)
@@ -53,7 +52,6 @@ class ConfigureNodes(object):
with io.open(node_file, 'w') as stream:
yaml.dump(node, stream, default_flow_style=False)
-
def download_deployment_config(self):
log('Download deployment config for environment %s' % self.env_id)
exec_cmd('fuel deployment --env %s --default --dir %s'
diff --git a/fuel/deploy/cloud/deploy.py b/fuel/deploy/cloud/deploy.py
index c8714f8..c423834 100644
--- a/fuel/deploy/cloud/deploy.py
+++ b/fuel/deploy/cloud/deploy.py
@@ -19,6 +19,8 @@ parse = common.parse
err = common.err
check_file_exists = common.check_file_exists
log = common.log
+commafy = common.commafy
+ArgParser = common.ArgParser
class Deploy(object):
@@ -57,8 +59,8 @@ class Deploy(object):
log('Deleting node %s' % node[N['id']])
exec_cmd('fuel node --node-id %s --delete-from-db'
% node[N['id']])
- exec_cmd('dockerctl shell cobbler cobbler system remove '
- '--name node-%s' % node[N['id']])
+ exec_cmd('cobbler system remove --name node-%s'
+ % node[N['id']], False)
def check_previous_installation(self):
log('Check previous installation')
@@ -120,7 +122,7 @@ class Deploy(object):
self.node_ids_dict[blade] = node[N['id']]
def discovery_waiting_loop(self, discovered_macs):
- WAIT_LOOP = 180
+ WAIT_LOOP = 320
SLEEP_TIME = 10
all_discovered = False
for i in range(WAIT_LOOP):
@@ -147,13 +149,8 @@ class Deploy(object):
def assign_roles_to_cluster_node_ids(self):
self.node_id_roles_dict = {}
for blade, node_id in self.node_ids_dict.iteritems():
- role_list = []
- role = self.dea.get_node_role(blade)
- if role == 'controller':
- role_list.extend(['controller', 'mongo'])
- elif role == 'compute':
- role_list.extend(['compute'])
- self.node_id_roles_dict[node_id] = (role_list, blade)
+ roles = commafy(self.dea.get_node_role(blade))
+ self.node_id_roles_dict[node_id] = (roles, blade)
def configure_environment(self):
config_env = ConfigureEnvironment(self.dea, YAML_CONF_DIR,
@@ -175,25 +172,16 @@ class Deploy(object):
self.configure_environment()
self.deploy_cloud()
-def usage():
- print '''
- Usage:
- python deploy.py <dea_file> <macs_file>
-
- Example:
- python deploy.py dea.yaml macs.yaml
- '''
-
def parse_arguments():
- if len(sys.argv) != 3:
- log('Incorrect number of arguments')
- usage()
- sys.exit(1)
- dea_file = sys.argv[-2]
- macs_file = sys.argv[-1]
- check_file_exists(dea_file)
- check_file_exists(macs_file)
- return dea_file, macs_file
+ parser = ArgParser(prog='python %s' % __file__)
+ parser.add_argument('dea_file', action='store',
+ help='Deployment Environment Adapter: dea.yaml')
+ parser.add_argument('macs_file', action='store',
+ help='Blade MAC addresses: macs.yaml')
+ args = parser.parse_args()
+ check_file_exists(args.dea_file)
+ check_file_exists(args.macs_file)
+ return (args.dea_file, args.macs_file)
def main():
diff --git a/fuel/deploy/cloud/deployment.py b/fuel/deploy/cloud/deployment.py
index cf56c36..0054c5b 100644
--- a/fuel/deploy/cloud/deployment.py
+++ b/fuel/deploy/cloud/deployment.py
@@ -31,7 +31,7 @@ class Deployment(object):
% (self.yaml_config_dir, self.env_id)
if os.path.exists(deployment_dir):
shutil.rmtree(deployment_dir)
- exec_cmd('fuel --env %s deployment --default --dir %s'
+ exec_cmd('fuel deployment --env %s --download --dir %s'
% (self.env_id, self.yaml_config_dir))
def upload_deployment_info(self):
@@ -75,7 +75,8 @@ class Deployment(object):
if env[0][E['status']] == 'operational':
ready = True
break
- elif env[0][E['status']] == 'error':
+ elif (env[0][E['status']] == 'error'
+ or env[0][E['status']] == 'stopped'):
break
else:
time.sleep(SLEEP_TIME)
@@ -102,10 +103,9 @@ class Deployment(object):
def health_check(self):
log('Now running sanity and smoke health checks')
- exec_cmd('fuel health --env %s --check sanity,smoke --force'
- % self.env_id)
- log('Health checks passed !')
-
+ log(exec_cmd('fuel health --env %s --check sanity,smoke --force'
+ % self.env_id))
+
def deploy(self):
self.config_opnfv()
self.run_deploy()
diff --git a/fuel/deploy/common.py b/fuel/deploy/common.py
index 6dbda67..dc12637 100644
--- a/fuel/deploy/common.py
+++ b/fuel/deploy/common.py
@@ -2,6 +2,7 @@ import subprocess
import sys
import os
import logging
+import argparse
N = {'id': 0, 'status': 1, 'name': 2, 'cluster': 3, 'ip': 4, 'mac': 5,
'roles': 6, 'pending_roles': 7, 'online': 8}
@@ -73,6 +74,19 @@ def check_dir_exists(dir_path):
if not os.path.isdir(dir_path):
err('ERROR: Directory %s not found\n' % dir_path)
+def create_dir_if_not_exists(dir_path):
+ if not os.path.isdir(dir_path):
+ log('Creating directory %s' % dir_path)
+ os.makedirs(dir_path)
+
+def commafy(comma_separated_list):
+ l = [c.strip() for c in comma_separated_list.split(',')]
+ return ','.join(l)
+
+def delete_file(file):
+ if os.path.exists(file):
+ os.remove(file)
+
def check_if_root():
r = exec_cmd('whoami')
if r != 'root':
@@ -80,3 +94,10 @@ def check_if_root():
def log(message):
LOG.debug('%s\n' % message)
+
+class ArgParser(argparse.ArgumentParser):
+ def error(self, message):
+ sys.stderr.write('ERROR: %s\n' % message)
+ self.print_help()
+ sys.exit(2)
+
diff --git a/fuel/deploy/dea.py b/fuel/deploy/dea.py
index 8066b6a..61ebea3 100644
--- a/fuel/deploy/dea.py
+++ b/fuel/deploy/dea.py
@@ -48,6 +48,8 @@ class DeploymentEnvironmentAdapter(object):
return node[property_name]
def get_node_role(self, node_id):
+ role_list = []
+
return self.get_node_property(node_id, 'role')
def get_node_ids(self):
diff --git a/fuel/deploy/deploy.py b/fuel/deploy/deploy.py
index 9d1a3d2..3305aed 100644
--- a/fuel/deploy/deploy.py
+++ b/fuel/deploy/deploy.py
@@ -1,33 +1,38 @@
-import sys
import os
import shutil
import io
import re
import netaddr
+import uuid
+import yaml
from dea import DeploymentEnvironmentAdapter
from dha import DeploymentHardwareAdapter
from install_fuel_master import InstallFuelMaster
from deploy_env import CloudDeploy
+from setup_execution_environment import ExecutionEnvironment
import common
log = common.log
exec_cmd = common.exec_cmd
err = common.err
check_file_exists = common.check_file_exists
+check_dir_exists = common.check_dir_exists
+create_dir_if_not_exists = common.create_dir_if_not_exists
check_if_root = common.check_if_root
+ArgParser = common.ArgParser
FUEL_VM = 'fuel'
-TMP_DIR = '%s/fueltmp' % os.getenv('HOME')
PATCH_DIR = 'fuel_patch'
WORK_DIR = 'deploy'
+CWD = os.getcwd()
class cd:
def __init__(self, new_path):
self.new_path = os.path.expanduser(new_path)
def __enter__(self):
- self.saved_path = os.getcwd()
+ self.saved_path = CWD
os.chdir(self.new_path)
def __exit__(self, etype, value, traceback):
@@ -36,8 +41,11 @@ class cd:
class AutoDeploy(object):
- def __init__(self, without_fuel, iso_file, dea_file, dha_file):
+ def __init__(self, without_fuel, storage_dir, pxe_bridge, iso_file,
+ dea_file, dha_file):
self.without_fuel = without_fuel
+ self.storage_dir = storage_dir
+ self.pxe_bridge = pxe_bridge
self.iso_file = iso_file
self.dea_file = dea_file
self.dha_file = dha_file
@@ -45,22 +53,8 @@ class AutoDeploy(object):
self.dha = DeploymentHardwareAdapter(dha_file)
self.fuel_conf = {}
self.fuel_node_id = self.dha.get_fuel_node_id()
- self.fuel_custom = self.dha.use_fuel_custom_install()
self.fuel_username, self.fuel_password = self.dha.get_fuel_access()
-
- def setup_dir(self, dir):
- self.cleanup_dir(dir)
- os.makedirs(dir)
-
- def cleanup_dir(self, dir):
- if os.path.isdir(dir):
- shutil.rmtree(dir)
-
- def power_off_blades(self):
- node_ids = self.dha.get_all_node_ids()
- node_ids = list(set(node_ids) - set([self.fuel_node_id]))
- for node_id in node_ids:
- self.dha.node_power_off(node_id)
+ self.tmp_dir = None
def modify_ip(self, ip_addr, index, val):
ip_str = str(netaddr.IPAddress(ip_addr))
@@ -77,11 +71,9 @@ class AutoDeploy(object):
self.fuel_conf['showmenu'] = 'yes'
def install_fuel_master(self):
- if self.without_fuel:
- log('Not Installing Fuel Master')
- return
log('Install Fuel Master')
- new_iso = '%s/deploy-%s' % (TMP_DIR, os.path.basename(self.iso_file))
+ new_iso = '%s/deploy-%s' \
+ % (self.tmp_dir, os.path.basename(self.iso_file))
self.patch_iso(new_iso)
self.iso_file = new_iso
self.install_iso()
@@ -91,23 +83,18 @@ class AutoDeploy(object):
self.fuel_conf['ip'], self.fuel_username,
self.fuel_password, self.fuel_node_id,
self.iso_file, WORK_DIR)
- if self.fuel_custom:
- log('Custom Fuel install')
- fuel.custom_install()
- else:
- log('Ordinary Fuel install')
- fuel.install()
+ fuel.install()
def patch_iso(self, new_iso):
- tmp_orig_dir = '%s/origiso' % TMP_DIR
- tmp_new_dir = '%s/newiso' % TMP_DIR
+ tmp_orig_dir = '%s/origiso' % self.tmp_dir
+ tmp_new_dir = '%s/newiso' % self.tmp_dir
self.copy(tmp_orig_dir, tmp_new_dir)
self.patch(tmp_new_dir, new_iso)
def copy(self, tmp_orig_dir, tmp_new_dir):
log('Copying...')
- self.setup_dir(tmp_orig_dir)
- self.setup_dir(tmp_new_dir)
+ os.makedirs(tmp_orig_dir)
+ os.makedirs(tmp_new_dir)
exec_cmd('fuseiso %s %s' % (self.iso_file, tmp_orig_dir))
with cd(tmp_orig_dir):
exec_cmd('find . | cpio -pd %s' % tmp_new_dir)
@@ -118,7 +105,7 @@ class AutoDeploy(object):
def patch(self, tmp_new_dir, new_iso):
log('Patching...')
- patch_dir = '%s/%s' % (os.getcwd(), PATCH_DIR)
+ patch_dir = '%s/%s' % (CWD, PATCH_DIR)
ks_path = '%s/ks.cfg.patch' % patch_dir
with cd(tmp_new_dir):
@@ -153,46 +140,81 @@ class AutoDeploy(object):
self.fuel_password, self.dea_file, WORK_DIR)
dep.deploy()
+ def setup_execution_environment(self):
+ exec_env = ExecutionEnvironment(self.storage_dir, self.pxe_bridge,
+ self.dha_file, self.dea)
+ exec_env.setup_environment()
+
+ def create_tmp_dir(self):
+ self.tmp_dir = '%s/fueltmp-%s' % (CWD, str(uuid.uuid1()))
+ os.makedirs(self.tmp_dir)
+
def deploy(self):
check_if_root()
- self.setup_dir(TMP_DIR)
self.collect_fuel_info()
- self.power_off_blades()
- self.install_fuel_master()
- self.cleanup_dir(TMP_DIR)
+ if not self.without_fuel:
+ self.setup_execution_environment()
+ self.create_tmp_dir()
+ self.install_fuel_master()
+ shutil.rmtree(self.tmp_dir)
self.deploy_env()
-def usage():
- print '''
- Usage:
- python deploy.py [-nf] <isofile> <deafile> <dhafile>
-
- Optional arguments:
- -nf Do not install Fuel master
- '''
+def check_bridge(pxe_bridge, dha_path):
+ with io.open(dha_path) as yaml_file:
+ dha_struct = yaml.load(yaml_file)
+ if dha_struct['adapter'] != 'libvirt':
+ log('Using Linux Bridge %s for booting up the Fuel Master VM'
+ % pxe_bridge)
+ r = exec_cmd('ip link show %s' % pxe_bridge)
+ if pxe_bridge in r and 'state UP' not in r:
+ err('Linux Bridge {0} is not Active, '
+ 'bring it UP first: [ip link set dev {0} up]' % pxe_bridge)
def parse_arguments():
- if (len(sys.argv) < 4 or len(sys.argv) > 5
- or (len(sys.argv) == 5 and sys.argv[1] != '-nf')):
- log('Incorrect number of arguments')
- usage()
- sys.exit(1)
- without_fuel = False
- if len(sys.argv) == 5 and sys.argv[1] == '-nf':
- without_fuel = True
- iso_file = sys.argv[-3]
- dea_file = sys.argv[-2]
- dha_file = sys.argv[-1]
- check_file_exists(iso_file)
- check_file_exists(dea_file)
- check_file_exists(dha_file)
- return (without_fuel, iso_file, dea_file, dha_file)
+ parser = ArgParser(prog='python %s' % __file__)
+ parser.add_argument('-nf', dest='without_fuel', action='store_true',
+ default=False,
+ help='Do not install Fuel Master (and Node VMs when '
+ 'using libvirt)')
+ parser.add_argument('iso_file', nargs='?', action='store',
+ default='%s/OPNFV.iso' % CWD,
+ help='ISO File [default: OPNFV.iso]')
+ parser.add_argument('dea_file', action='store',
+ help='Deployment Environment Adapter: dea.yaml')
+ parser.add_argument('dha_file', action='store',
+ help='Deployment Hardware Adapter: dha.yaml')
+ parser.add_argument('storage_dir', nargs='?', action='store',
+ default='%s/images' % CWD,
+ help='Storage Directory [default: images]')
+ parser.add_argument('pxe_bridge', nargs='?', action='store',
+ default='pxebr',
+ help='Linux Bridge for booting up the Fuel Master VM '
+ '[default: pxebr]')
+
+ args = parser.parse_args()
+
+ check_file_exists(args.dea_file)
+ check_file_exists(args.dha_file)
+
+ if not args.without_fuel:
+ log('Using OPNFV ISO file: %s' % args.iso_file)
+ check_file_exists(args.iso_file)
+ log('Using image directory: %s' % args.storage_dir)
+ create_dir_if_not_exists(args.storage_dir)
+ log('Using bridge %s to boot up Fuel Master VM on it'
+ % args.pxe_bridge)
+ check_bridge(args.pxe_bridge, args.dha_file)
+
+ return (args.without_fuel, args.storage_dir, args.pxe_bridge,
+ args.iso_file, args.dea_file, args.dha_file)
-def main():
- without_fuel, iso_file, dea_file, dha_file = parse_arguments()
+def main():
+ without_fuel, storage_dir, pxe_bridge, iso_file, dea_file, dha_file = \
+ parse_arguments()
- d = AutoDeploy(without_fuel, iso_file, dea_file, dha_file)
+ d = AutoDeploy(without_fuel, storage_dir, pxe_bridge, iso_file,
+ dea_file, dha_file)
d.deploy()
if __name__ == '__main__':
diff --git a/fuel/deploy/deploy_env.py b/fuel/deploy/deploy_env.py
index 9bc8fbb..48aec18 100644
--- a/fuel/deploy/deploy_env.py
+++ b/fuel/deploy/deploy_env.py
@@ -53,7 +53,7 @@ class CloudDeploy(object):
def set_boot_order(self, boot_order_list):
for node_id in self.node_ids:
- self.dha.node_set_boot_order(node_id, boot_order_list)
+ self.dha.node_set_boot_order(node_id, boot_order_list[:])
def get_mac_addresses(self):
macs_per_node = {}
@@ -67,8 +67,8 @@ class CloudDeploy(object):
deploy_app = '%s/%s' % (self.work_dir, deploy_app)
dea_file = '%s/%s' % (self.work_dir, os.path.basename(self.dea_file))
macs_file = '%s/%s' % (self.work_dir, os.path.basename(self.macs_file))
- with self.ssh:
- self.ssh.run('python %s %s %s' % (deploy_app, dea_file, macs_file))
+ with self.ssh as s:
+ s.run('python %s %s %s' % (deploy_app, dea_file, macs_file))
def deploy(self):
diff --git a/fuel/deploy/dha_adapters/hardware_adapter.py b/fuel/deploy/dha_adapters/hardware_adapter.py
index 884e9ce..a8d0121 100644
--- a/fuel/deploy/dha_adapters/hardware_adapter.py
+++ b/fuel/deploy/dha_adapters/hardware_adapter.py
@@ -34,18 +34,15 @@ class HardwareAdapter(object):
node_ids.sort()
return node_ids
- def use_fuel_custom_install(self):
- return self.dha_struct['fuelCustomInstall']
-
def get_node_property(self, node_id, property_name):
for node in self.dha_struct['nodes']:
if node['id'] == node_id and property_name in node:
return node[property_name]
- def node_can_zero_mbr(self, node_id):
- return self.get_node_property(node_id, 'nodeCanZeroMBR')
-
def get_fuel_access(self):
for node in self.dha_struct['nodes']:
if 'isFuel' in node and node['isFuel']:
return node['username'], node['password']
+
+ def get_disks(self):
+ return self.dha_struct['disks'] \ No newline at end of file
diff --git a/fuel/deploy/dha_adapters/hp_adapter.py b/fuel/deploy/dha_adapters/hp_adapter.py
index 8fc38ad..8cfec34 100644
--- a/fuel/deploy/dha_adapters/hp_adapter.py
+++ b/fuel/deploy/dha_adapters/hp_adapter.py
@@ -19,7 +19,7 @@ class HpAdapter(IpmiAdapter):
log('Set boot order %s on Node %s' % (boot_order_list, node_id))
ip, username, password = self.get_access_info(node_id)
ssh = SSHClient(ip, username, password)
- for order, dev in enumerate(boot_order_list):
- with ssh as s:
+ with ssh as s:
+ for order, dev in enumerate(boot_order_list):
s.exec_cmd('set %s/%s bootorder=%s'
% (ROOT, DEV[dev], order+1))
diff --git a/fuel/deploy/dha_adapters/ipmi_adapter.py b/fuel/deploy/dha_adapters/ipmi_adapter.py
index d97fd2d..1bef898 100644
--- a/fuel/deploy/dha_adapters/ipmi_adapter.py
+++ b/fuel/deploy/dha_adapters/ipmi_adapter.py
@@ -1,8 +1,10 @@
import common
+import time
from hardware_adapter import HardwareAdapter
log = common.log
exec_cmd = common.exec_cmd
+err = common.err
class IpmiAdapter(HardwareAdapter):
@@ -27,28 +29,72 @@ class IpmiAdapter(HardwareAdapter):
return mac_list
def node_power_on(self, node_id):
+ WAIT_LOOP = 200
+ SLEEP_TIME = 3
log('Power ON Node %s' % node_id)
cmd_prefix = self.ipmi_cmd(node_id)
state = exec_cmd('%s chassis power status' % cmd_prefix)
if state == 'Chassis Power is off':
exec_cmd('%s chassis power on' % cmd_prefix)
+ done = False
+ for i in range(WAIT_LOOP):
+ state, _ = exec_cmd('%s chassis power status' % cmd_prefix,
+ False)
+ if state == 'Chassis Power is on':
+ done = True
+ break
+ else:
+ time.sleep(SLEEP_TIME)
+ if not done:
+ err('Could Not Power ON Node %s' % node_id)
def node_power_off(self, node_id):
+ WAIT_LOOP = 200
+ SLEEP_TIME = 3
log('Power OFF Node %s' % node_id)
cmd_prefix = self.ipmi_cmd(node_id)
state = exec_cmd('%s chassis power status' % cmd_prefix)
if state == 'Chassis Power is on':
+ done = False
exec_cmd('%s chassis power off' % cmd_prefix)
+ for i in range(WAIT_LOOP):
+ state, _ = exec_cmd('%s chassis power status' % cmd_prefix,
+ False)
+ if state == 'Chassis Power is off':
+ done = True
+ break
+ else:
+ time.sleep(SLEEP_TIME)
+ if not done:
+ err('Could Not Power OFF Node %s' % node_id)
def node_reset(self, node_id):
- log('Reset Node %s' % node_id)
+ WAIT_LOOP = 600
+ log('RESET Node %s' % node_id)
cmd_prefix = self.ipmi_cmd(node_id)
state = exec_cmd('%s chassis power status' % cmd_prefix)
if state == 'Chassis Power is on':
+ was_shut_off = False
+ done = False
exec_cmd('%s chassis power reset' % cmd_prefix)
+ for i in range(WAIT_LOOP):
+ state, _ = exec_cmd('%s chassis power status' % cmd_prefix,
+ False)
+ if state == 'Chassis Power is off':
+ was_shut_off = True
+ elif state == 'Chassis Power is on' and was_shut_off:
+ done = True
+ break
+ time.sleep(1)
+ if not done:
+ err('Could Not RESET Node %s' % node_id)
+ else:
+ err('Cannot RESET Node %s because it\'s not Active, state: %s'
+ % (node_id, state))
def node_set_boot_order(self, node_id, boot_order_list):
log('Set boot order %s on Node %s' % (boot_order_list, node_id))
+ boot_order_list.reverse()
cmd_prefix = self.ipmi_cmd(node_id)
for dev in boot_order_list:
if dev == 'pxe':
@@ -58,4 +104,4 @@ class IpmiAdapter(HardwareAdapter):
exec_cmd('%s chassis bootdev cdrom' % cmd_prefix)
elif dev == 'disk':
exec_cmd('%s chassis bootdev disk options=persistent'
- % cmd_prefix)
+ % cmd_prefix) \ No newline at end of file
diff --git a/fuel/deploy/dha_adapters/libvirt_adapter.py b/fuel/deploy/dha_adapters/libvirt_adapter.py
index dde4946..1eca548 100644
--- a/fuel/deploy/dha_adapters/libvirt_adapter.py
+++ b/fuel/deploy/dha_adapters/libvirt_adapter.py
@@ -96,12 +96,6 @@ class LibvirtAdapter(HardwareAdapter):
exec_cmd('virsh change-media %s --insert %s %s'
% (vm_name, device, iso_file))
- def get_disks(self):
- return self.dha_struct['disks']
-
- def get_node_role(self, node_id):
- return self.get_node_property(node_id, 'role')
-
def get_node_pxe_mac(self, node_id):
mac_list = []
vm_name = self.get_node_property(node_id, 'libvirtName')
diff --git a/fuel/deploy/environments/__init__.py b/fuel/deploy/environments/__init__.py
new file mode 100644
index 0000000..c274feb
--- /dev/null
+++ b/fuel/deploy/environments/__init__.py
@@ -0,0 +1 @@
+__author__ = 'eszicse'
diff --git a/fuel/deploy/environments/execution_environment.py b/fuel/deploy/environments/execution_environment.py
new file mode 100644
index 0000000..4f612a6
--- /dev/null
+++ b/fuel/deploy/environments/execution_environment.py
@@ -0,0 +1,67 @@
+from lxml import etree
+
+import common
+from dha_adapters.libvirt_adapter import LibvirtAdapter
+
+exec_cmd = common.exec_cmd
+err = common.err
+log = common.log
+check_dir_exists = common.check_dir_exists
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+class ExecutionEnvironment(object):
+
+ def __init__(self, storage_dir, dha_file, root_dir):
+ self.storage_dir = storage_dir
+ self.dha = LibvirtAdapter(dha_file)
+ self.root_dir = root_dir
+ self.parser = etree.XMLParser(remove_blank_text=True)
+ self.fuel_node_id = self.dha.get_fuel_node_id()
+
+ def delete_vm(self, node_id):
+ vm_name = self.dha.get_node_property(node_id, 'libvirtName')
+ r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
+ if c:
+ return
+ self.undefine_vm_delete_disk(r, vm_name)
+
+ def undefine_vm_delete_disk(self, printout, vm_name):
+ disk_files = []
+ xml_dump = etree.fromstring(printout, self.parser)
+ disks = xml_dump.xpath('/domain/devices/disk')
+ for disk in disks:
+ sources = disk.xpath('source')
+ for source in sources:
+ source_file = source.get('file')
+ if source_file:
+ disk_files.append(source_file)
+ log('Deleting VM %s with disks %s' % (vm_name, disk_files))
+ exec_cmd('virsh destroy %s' % vm_name, False)
+ exec_cmd('virsh undefine %s' % vm_name, False)
+ for file in disk_files:
+ exec_cmd('rm -f %s' % file)
+
+ def define_vm(self, vm_name, temp_vm_file, disk_path):
+ log('Creating VM %s with disks %s' % (vm_name, disk_path))
+ with open(temp_vm_file) as f:
+ vm_xml = etree.parse(f)
+ names = vm_xml.xpath('/domain/name')
+ for name in names:
+ name.text = vm_name
+ uuids = vm_xml.xpath('/domain/uuid')
+ for uuid in uuids:
+ uuid.getparent().remove(uuid)
+ disks = vm_xml.xpath('/domain/devices/disk')
+ for disk in disks:
+ if (disk.get('type') == 'file'
+ and disk.get('device') == 'disk'):
+ sources = disk.xpath('source')
+ for source in sources:
+ disk.remove(source)
+ source = etree.Element('source')
+ source.set('file', disk_path)
+ disk.append(source)
+ with open(temp_vm_file, 'w') as f:
+ vm_xml.write(f, pretty_print=True, xml_declaration=True)
+ exec_cmd('virsh define %s' % temp_vm_file) \ No newline at end of file
diff --git a/fuel/deploy/environments/libvirt_environment.py b/fuel/deploy/environments/libvirt_environment.py
new file mode 100644
index 0000000..e156fd2
--- /dev/null
+++ b/fuel/deploy/environments/libvirt_environment.py
@@ -0,0 +1,93 @@
+from lxml import etree
+import glob
+
+import common
+from execution_environment import ExecutionEnvironment
+
+exec_cmd = common.exec_cmd
+err = common.err
+log = common.log
+check_dir_exists = common.check_dir_exists
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+NET_DIR = 'libvirt/networks'
+
+class LibvirtEnvironment(ExecutionEnvironment):
+
+ def __init__(self, storage_dir, dha_file, dea, root_dir):
+ super(LibvirtEnvironment, self).__init__(
+ storage_dir, dha_file, root_dir)
+ self.dea = dea
+ self.network_dir = '%s/%s' % (self.root_dir, NET_DIR)
+ self.node_ids = self.dha.get_all_node_ids()
+ self.net_names = self.collect_net_names()
+
+ def create_storage(self, node_id, disk_path, disk_sizes):
+ if node_id == self.fuel_node_id:
+ disk_size = disk_sizes['fuel']
+ else:
+ roles = self.dea.get_node_role(node_id)
+ role = 'controller' if 'controller' in roles else 'compute'
+ disk_size = disk_sizes[role]
+ exec_cmd('fallocate -l %s %s' % (disk_size, disk_path))
+
+ def create_vms(self):
+ temp_dir = exec_cmd('mktemp -d')
+ disk_sizes = self.dha.get_disks()
+ for node_id in self.node_ids:
+ vm_name = self.dha.get_node_property(node_id, 'libvirtName')
+ vm_template = '%s/%s' % (self.root_dir,
+ self.dha.get_node_property(
+ node_id, 'libvirtTemplate'))
+ check_file_exists(vm_template)
+ disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
+ self.create_storage(node_id, disk_path, disk_sizes)
+ temp_vm_file = '%s/%s' % (temp_dir, vm_name)
+ exec_cmd('cp %s %s' % (vm_template, temp_vm_file))
+ self.define_vm(vm_name, temp_vm_file, disk_path)
+ exec_cmd('rm -fr %s' % temp_dir)
+
+ def create_networks(self):
+ for net_file in glob.glob('%s/*' % self.network_dir):
+ exec_cmd('virsh net-define %s' % net_file)
+ for net in self.net_names:
+ log('Creating network %s' % net)
+ exec_cmd('virsh net-autostart %s' % net)
+ exec_cmd('virsh net-start %s' % net)
+
+ def delete_networks(self):
+ for net in self.net_names:
+ log('Deleting network %s' % net)
+ exec_cmd('virsh net-destroy %s' % net, False)
+ exec_cmd('virsh net-undefine %s' % net, False)
+
+ def get_net_name(self, net_file):
+ with open(net_file) as f:
+ net_xml = etree.parse(f)
+ name_list = net_xml.xpath('/network/name')
+ for name in name_list:
+ net_name = name.text
+ return net_name
+
+ def collect_net_names(self):
+ net_list = []
+ for net_file in glob.glob('%s/*' % self.network_dir):
+ name = self.get_net_name(net_file)
+ net_list.append(name)
+ return net_list
+
+ def delete_vms(self):
+ for node_id in self.node_ids:
+ self.delete_vm(node_id)
+
+ def setup_environment(self):
+ check_if_root()
+ check_dir_exists(self.network_dir)
+ self.cleanup_environment()
+ self.create_vms()
+ self.create_networks()
+
+ def cleanup_environment(self):
+ self.delete_vms()
+ self.delete_networks()
diff --git a/fuel/deploy/environments/virtual_fuel.py b/fuel/deploy/environments/virtual_fuel.py
new file mode 100644
index 0000000..1f939f0
--- /dev/null
+++ b/fuel/deploy/environments/virtual_fuel.py
@@ -0,0 +1,60 @@
+from lxml import etree
+
+import common
+from execution_environment import ExecutionEnvironment
+
+exec_cmd = common.exec_cmd
+log = common.log
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+class VirtualFuel(ExecutionEnvironment):
+
+ def __init__(self, storage_dir, pxe_bridge, dha_file, root_dir):
+ super(VirtualFuel, self).__init__(
+ storage_dir, dha_file, root_dir)
+ self.pxe_bridge = pxe_bridge
+
+ def set_vm_nic(self, temp_vm_file):
+ with open(temp_vm_file) as f:
+ vm_xml = etree.parse(f)
+ interfaces = vm_xml.xpath('/domain/devices/interface')
+ for interface in interfaces:
+ interface.getparent().remove(interface)
+ interface = etree.Element('interface')
+ interface.set('type', 'bridge')
+ source = etree.SubElement(interface, 'source')
+ source.set('bridge', self.pxe_bridge)
+ model = etree.SubElement(interface, 'model')
+ model.set('type', 'virtio')
+ devices = vm_xml.xpath('/domain/devices')
+ if devices:
+ device = devices[0]
+ device.append(interface)
+ with open(temp_vm_file, 'w') as f:
+ vm_xml.write(f, pretty_print=True, xml_declaration=True)
+
+ def create_vm(self):
+ temp_dir = exec_cmd('mktemp -d')
+ vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
+ vm_template = '%s/%s' % (self.root_dir,
+ self.dha.get_node_property(
+ self.fuel_node_id, 'libvirtTemplate'))
+ check_file_exists(vm_template)
+ disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
+ disk_sizes = self.dha.get_disks()
+ disk_size = disk_sizes['fuel']
+ exec_cmd('fallocate -l %s %s' % (disk_size, disk_path))
+ temp_vm_file = '%s/%s' % (temp_dir, vm_name)
+ exec_cmd('cp %s %s' % (vm_template, temp_vm_file))
+ self.set_vm_nic(temp_vm_file)
+ self.define_vm(vm_name, temp_vm_file, disk_path)
+ exec_cmd('rm -fr %s' % temp_dir)
+
+ def setup_environment(self):
+ check_if_root()
+ self.cleanup_environment()
+ self.create_vm()
+
+ def cleanup_environment(self):
+ self.delete_vm(self.fuel_node_id)
diff --git a/fuel/deploy/install_fuel_master.py b/fuel/deploy/install_fuel_master.py
index bb8e7e1..ea24ff0 100644
--- a/fuel/deploy/install_fuel_master.py
+++ b/fuel/deploy/install_fuel_master.py
@@ -32,15 +32,6 @@ class InstallFuelMaster(object):
self.dha.node_power_off(self.fuel_node_id)
- self.zero_mbr_set_boot_order()
-
- self.proceed_with_installation()
-
- def custom_install(self):
- log('Start Custom Fuel Installation')
-
- self.dha.node_power_off(self.fuel_node_id)
-
log('Zero the MBR')
self.dha.node_zero_mbr(self.fuel_node_id)
@@ -68,7 +59,7 @@ class InstallFuelMaster(object):
log('Let the Fuel deployment continue')
log('Found FUEL menu as PID %s, now killing it' % fuel_menu_pid)
- self.ssh_exec_cmd('kill %s' % fuel_menu_pid)
+ self.ssh_exec_cmd('kill %s' % fuel_menu_pid, False)
log('Wait until installation complete')
self.wait_until_installation_completed()
@@ -81,18 +72,6 @@ class InstallFuelMaster(object):
log('Fuel Master installed successfully !')
- def zero_mbr_set_boot_order(self):
- if self.dha.node_can_zero_mbr(self.fuel_node_id):
- log('Fuel Node %s capable of zeroing MBR so doing that...'
- % self.fuel_node_id)
- self.dha.node_zero_mbr(self.fuel_node_id)
- self.dha.node_set_boot_order(self.fuel_node_id, ['disk', 'iso'])
- elif self.dha.node_can_set_boot_order_live(self.fuel_node_id):
- log('Node %s can change ISO boot order live' % self.fuel_node_id)
- self.dha.node_set_boot_order(self.fuel_node_id, ['iso', 'disk'])
- else:
- err('No way to install Fuel node')
-
def wait_for_node_up(self):
WAIT_LOOP = 60
SLEEP_TIME = 10
@@ -103,8 +82,8 @@ class InstallFuelMaster(object):
success = True
break
except Exception as e:
- log('EXCEPTION [%s] received when SSH-ing into Fuel VM %s ... '
- 'sleeping %s seconds' % (e, self.fuel_ip, SLEEP_TIME))
+ log('Trying to SSH into Fuel VM %s ... sleeping %s seconds'
+ % (self.fuel_ip, SLEEP_TIME))
time.sleep(SLEEP_TIME)
finally:
self.ssh.close()
@@ -138,9 +117,9 @@ class InstallFuelMaster(object):
break
return fuel_menu_pid
- def ssh_exec_cmd(self, cmd):
+ def ssh_exec_cmd(self, cmd, check=True):
with self.ssh:
- ret = self.ssh.exec_cmd(cmd)
+ ret = self.ssh.exec_cmd(cmd, check=check)
return ret
def inject_own_astute_yaml(self):
@@ -159,7 +138,7 @@ class InstallFuelMaster(object):
self.work_dir, os.path.basename(self.dea_file)))
def wait_until_installation_completed(self):
- WAIT_LOOP = 180
+ WAIT_LOOP = 320
SLEEP_TIME = 10
CMD = 'ps -ef | grep %s | grep -v grep' % BOOTSTRAP_ADMIN
diff --git a/fuel/deploy/libvirt/conf/ha/dea.yaml b/fuel/deploy/libvirt/conf/ha/dea.yaml
new file mode 100644
index 0000000..907bf90
--- /dev/null
+++ b/fuel/deploy/libvirt/conf/ha/dea.yaml
@@ -0,0 +1,976 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: Sat Apr 25 16:26:22 UTC 2015
+comment: Small libvirt setup
+environment_name: opnfv_virt
+environment_mode: ha
+wanted_release: Juno on Ubuntu 12.04.4
+nodes:
+- id: 1
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 2
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 3
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 4
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 5
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 6
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+fuel:
+ ADMIN_NETWORK:
+ ipaddress: 10.20.0.2
+ netmask: 255.255.255.0
+ dhcp_pool_start: 10.20.0.3
+ dhcp_pool_end: 10.20.0.254
+ DNS_UPSTREAM: 8.8.8.8
+ DNS_DOMAIN: domain.tld
+ DNS_SEARCH: domain.tld
+ FUEL_ACCESS:
+ user: admin
+ password: admin
+ HOSTNAME: opnfv_virt
+ NTP1: 0.pool.ntp.org
+ NTP2: 1.pool.ntp.org
+ NTP3: 2.pool.ntp.org
+interfaces:
+ interfaces_1:
+ eth0:
+ - fuelweb_admin
+ - management
+ eth1:
+ - storage
+ eth2:
+ - private
+ eth3:
+ - public
+transformations:
+ transformations_1:
+ - action: add-br
+ name: br-eth0
+ - action: add-port
+ bridge: br-eth0
+ name: eth0
+ - action: add-br
+ name: br-eth1
+ - action: add-port
+ bridge: br-eth1
+ name: eth1
+ - action: add-br
+ name: br-eth2
+ - action: add-port
+ bridge: br-eth2
+ name: eth2
+ - action: add-br
+ name: br-eth3
+ - action: add-port
+ bridge: br-eth3
+ name: eth3
+ - action: add-br
+ name: br-ex
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-fw-admin
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-storage
+ tags:
+ - 102
+ - 0
+ vlan_ids:
+ - 102
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth0
+ - br-mgmt
+ tags:
+ - 101
+ - 0
+ vlan_ids:
+ - 101
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth0
+ - br-fw-admin
+ trunks:
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth3
+ - br-ex
+ trunks:
+ - 0
+ - action: add-br
+ name: br-prv
+ - action: add-patch
+ bridges:
+ - br-eth2
+ - br-prv
+ transformations_2:
+ - action: add-br
+ name: br-eth0
+ - action: add-port
+ bridge: br-eth0
+ name: eth0
+ - action: add-br
+ name: br-eth1
+ - action: add-port
+ bridge: br-eth1
+ name: eth1
+ - action: add-br
+ name: br-eth2
+ - action: add-port
+ bridge: br-eth2
+ name: eth2
+ - action: add-br
+ name: br-eth3
+ - action: add-port
+ bridge: br-eth3
+ name: eth3
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-fw-admin
+ - action: add-patch
+ bridges:
+ - br-eth1
+ - br-storage
+ tags:
+ - 102
+ - 0
+ vlan_ids:
+ - 102
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth0
+ - br-mgmt
+ tags:
+ - 101
+ - 0
+ vlan_ids:
+ - 101
+ - 0
+ - action: add-patch
+ bridges:
+ - br-eth0
+ - br-fw-admin
+ trunks:
+ - 0
+ - action: add-br
+ name: br-prv
+ - action: add-patch
+ bridges:
+ - br-eth2
+ - br-prv
+opnfv:
+ compute: {}
+ controller: {}
+network:
+ networking_parameters:
+ base_mac: fa:16:3e:00:00:00
+ dns_nameservers:
+ - 8.8.4.4
+ - 8.8.8.8
+ floating_ranges:
+ - - 172.16.0.130
+ - 172.16.0.254
+ gre_id_range:
+ - 2
+ - 65535
+ internal_cidr: 192.168.111.0/24
+ internal_gateway: 192.168.111.1
+ net_l23_provider: ovs
+ segmentation_type: vlan
+ vlan_range:
+ - 1000
+ - 1030
+ networks:
+ - cidr: 172.16.0.0/24
+ gateway: 172.16.0.1
+ ip_ranges:
+ - - 172.16.0.2
+ - 172.16.0.126
+ meta:
+ assign_vip: true
+ cidr: 172.16.0.0/24
+ configurable: true
+ floating_range_var: floating_ranges
+ ip_range:
+ - 172.16.0.2
+ - 172.16.0.126
+ map_priority: 1
+ name: public
+ notation: ip_ranges
+ render_addr_mask: public
+ render_type: null
+ use_gateway: true
+ vlan_start: null
+ name: public
+ vlan_start: null
+ - cidr: 192.168.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.0.1
+ - 192.168.0.254
+ meta:
+ assign_vip: true
+ cidr: 192.168.0.0/24
+ configurable: true
+ map_priority: 2
+ name: management
+ notation: cidr
+ render_addr_mask: internal
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 101
+ name: management
+ vlan_start: 101
+ - cidr: 192.168.1.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.1.1
+ - 192.168.1.254
+ meta:
+ assign_vip: false
+ cidr: 192.168.1.0/24
+ configurable: true
+ map_priority: 2
+ name: storage
+ notation: cidr
+ render_addr_mask: storage
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 102
+ name: storage
+ vlan_start: 102
+ - cidr: null
+ gateway: null
+ ip_ranges: []
+ meta:
+ assign_vip: false
+ configurable: false
+ map_priority: 2
+ name: private
+ neutron_vlan_range: true
+ notation: null
+ render_addr_mask: null
+ render_type: null
+ seg_type: vlan
+ use_gateway: false
+ vlan_start: null
+ name: private
+ vlan_start: null
+ - cidr: 10.20.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 10.20.0.3
+ - 10.20.0.254
+ meta:
+ assign_vip: false
+ configurable: false
+ map_priority: 0
+ notation: ip_ranges
+ render_addr_mask: null
+ render_type: null
+ unmovable: true
+ use_gateway: true
+ name: fuelweb_admin
+ vlan_start: null
+settings:
+ editable:
+ access:
+ email:
+ description: Email address for Administrator
+ label: email
+ type: text
+ value: admin@localhost
+ weight: 40
+ metadata:
+ label: Access
+ weight: 10
+ password:
+ description: Password for Administrator
+ label: password
+ type: password
+ value: admin
+ weight: 20
+ tenant:
+ description: Tenant (project) name for Administrator
+ label: tenant
+ regex:
+ error: Invalid tenant name
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+ type: text
+ value: admin
+ weight: 30
+ user:
+ description: Username for Administrator
+ label: username
+ regex:
+ error: Invalid username
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+ type: text
+ value: admin
+ weight: 10
+ additional_components:
+ ceilometer:
+ description: If selected, Ceilometer component will be installed
+ label: Install Ceilometer
+ type: checkbox
+ value: false
+ weight: 40
+ heat:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 30
+ metadata:
+ label: Additional Components
+ weight: 20
+ murano:
+ description: If selected, Murano component will be installed
+ label: Install Murano
+ restrictions:
+ - cluster:net_provider != 'neutron'
+ type: checkbox
+ value: false
+ weight: 20
+ sahara:
+ description: If selected, Sahara component will be installed
+ label: Install Sahara
+ type: checkbox
+ value: false
+ weight: 10
+ common:
+ auth_key:
+ description: Public key(s) to include in authorized_keys on deployed nodes
+ label: Public Key
+ type: text
+ value: ''
+ weight: 70
+ auto_assign_floating_ip:
+ description: If selected, OpenStack will automatically assign a floating IP
+ to a new instance
+ label: Auto assign floating IP
+ restrictions:
+ - cluster:net_provider == 'neutron'
+ type: checkbox
+ value: false
+ weight: 40
+ compute_scheduler_driver:
+ label: Scheduler driver
+ type: radio
+ value: nova.scheduler.filter_scheduler.FilterScheduler
+ values:
+ - data: nova.scheduler.filter_scheduler.FilterScheduler
+ description: Currently the most advanced OpenStack scheduler. See the OpenStack
+ documentation for details.
+ label: Filter scheduler
+ - data: nova.scheduler.simple.SimpleScheduler
+ description: This is 'naive' scheduler which tries to find the least loaded
+ host
+ label: Simple scheduler
+ weight: 40
+ debug:
+ description: Debug logging mode provides more information, but requires more
+ disk space.
+ label: OpenStack debug logging
+ type: checkbox
+ value: false
+ weight: 20
+ disable_offload:
+ description: If set, generic segmentation offload (gso) and generic receive
+ offload (gro) on physical nics will be disabled. See ethtool man.
+ label: Disable generic offload on physical nics
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
+ == 'gre'
+ type: checkbox
+ value: true
+ weight: 80
+ libvirt_type:
+ label: Hypervisor type
+ type: radio
+ value: kvm
+ values:
+ - data: kvm
+ description: Choose this type of hypervisor if you run OpenStack on hardware
+ label: KVM
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ - data: qemu
+ description: Choose this type of hypervisor if you run OpenStack on virtual
+ hosts.
+ label: QEMU
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ - data: vcenter
+ description: Choose this type of hypervisor if you run OpenStack in a vCenter
+ environment.
+ label: vCenter
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
+ == 'neutron'
+ weight: 30
+ metadata:
+ label: Common
+ weight: 30
+ nova_quota:
+ description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+ quotas will increase load on the Nova database.
+ label: Nova quotas
+ type: checkbox
+ value: false
+ weight: 25
+ resume_guests_state_on_host_boot:
+ description: Whether to resume previous guests state when the host reboots.
+ If enabled, this option causes guests assigned to the host to resume their
+ previous state. If the guest was running a restart will be attempted when
+ nova-compute starts. If the guest was not running previously, a restart
+ will not be attempted.
+ label: Resume guests state on host boot
+ type: checkbox
+ value: true
+ weight: 60
+ use_cow_images:
+ description: For most cases you will want qcow format. If it's disabled, raw
+ image format will be used to run VMs. OpenStack with raw format currently
+ does not support snapshotting.
+ label: Use qcow format for images
+ type: checkbox
+ value: true
+ weight: 50
+ corosync:
+ group:
+ description: ''
+ label: Group
+ type: text
+ value: 226.94.1.1
+ weight: 10
+ metadata:
+ label: Corosync
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 50
+ port:
+ description: ''
+ label: Port
+ type: text
+ value: '12000'
+ weight: 20
+ verified:
+ description: Set True only if multicast is configured correctly on router.
+ label: Need to pass network verification.
+ type: checkbox
+ value: false
+ weight: 10
+ external_dns:
+ dns_list:
+ description: List of upstream DNS servers, separated by comma
+ label: DNS list
+ type: text
+ value: 8.8.8.8, 8.8.4.4
+ weight: 10
+ metadata:
+ label: Upstream DNS
+ weight: 90
+ external_ntp:
+ metadata:
+ label: Upstream NTP
+ weight: 100
+ ntp_list:
+ description: List of upstream NTP servers, separated by comma
+ label: NTP servers list
+ type: text
+ value: 0.pool.ntp.org, 1.pool.ntp.org
+ weight: 10
+ kernel_params:
+ kernel:
+ description: Default kernel parameters
+ label: Initial parameters
+ type: text
+ value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
+ weight: 45
+ metadata:
+ label: Kernel parameters
+ weight: 40
+ neutron_mellanox:
+ metadata:
+ enabled: true
+ label: Mellanox Neutron components
+ toggleable: false
+ weight: 50
+ plugin:
+ label: Mellanox drivers and SR-IOV plugin
+ type: radio
+ value: disabled
+ values:
+ - data: disabled
+ description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+ not be installed.
+ label: Mellanox drivers and plugins disabled
+ restrictions:
+ - settings:storage.iser.value == true
+ - data: drivers_only
+ description: If selected, Mellanox Ethernet drivers will be installed to
+ support networking over Mellanox NIC. Mellanox Neutron plugin will not
+ be installed.
+ label: Install only Mellanox drivers
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm'
+ - data: ethernet
+ description: If selected, both Mellanox Ethernet drivers and Mellanox network
+ acceleration (Neutron) plugin will be installed.
+ label: Install Mellanox drivers and SR-IOV plugin
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+ == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+ weight: 60
+ vf_num:
+ description: Note that one virtual function will be reserved to the storage
+ network, in case of choosing iSER.
+ label: Number of virtual NICs
+ restrictions:
+ - settings:neutron_mellanox.plugin.value != 'ethernet'
+ type: text
+ value: '16'
+ weight: 70
+ nsx_plugin:
+ connector_type:
+ description: Default network transport type to use
+ label: NSX connector type
+ type: select
+ value: stt
+ values:
+ - data: gre
+ label: GRE
+ - data: ipsec_gre
+ label: GRE over IPSec
+ - data: stt
+ label: STT
+ - data: ipsec_stt
+ label: STT over IPSec
+ - data: bridge
+ label: Bridge
+ weight: 80
+ l3_gw_service_uuid:
+ description: UUID for the default L3 gateway service to use with this cluster
+ label: L3 service UUID
+ regex:
+ error: Invalid L3 gateway service UUID
+ source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+ type: text
+ value: ''
+ weight: 50
+ metadata:
+ enabled: false
+ label: VMware NSX
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
+ != 'nsx'
+ weight: 20
+ nsx_controllers:
+ description: One or more IPv4[:port] addresses of NSX controller node, separated
+ by comma (e.g. 10.30.30.2,192.168.110.254:443)
+ label: NSX controller endpoint
+ regex:
+ error: Invalid controller endpoints, specify valid IPv4[:port] pair
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
+ type: text
+ value: ''
+ weight: 60
+ nsx_password:
+ description: Password for Administrator
+ label: NSX password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: ''
+ weight: 30
+ nsx_username:
+ description: NSX administrator's username
+ label: NSX username
+ regex:
+ error: Empty username
+ source: \S
+ type: text
+ value: admin
+ weight: 20
+ packages_url:
+ description: URL to NSX specific packages
+ label: URL to NSX bits
+ regex:
+ error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
+ http://10.20.0.2/nsx)
+ source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
+ type: text
+ value: ''
+ weight: 70
+ replication_mode:
+ description: ''
+ label: NSX cluster has Service nodes
+ type: checkbox
+ value: true
+ weight: 90
+ transport_zone_uuid:
+ description: UUID of the pre-existing default NSX Transport zone
+ label: Transport zone UUID
+ regex:
+ error: Invalid transport zone UUID
+ source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+ type: text
+ value: ''
+ weight: 40
+ provision:
+ metadata:
+ label: Provision
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ weight: 80
+ method:
+ description: Which provision method to use for this cluster.
+ label: Provision method
+ type: radio
+ value: cobbler
+ values:
+ - data: image
+ description: Copying pre-built images on a disk.
+ label: Image
+ - data: cobbler
+ description: Install from scratch using anaconda or debian-installer.
+ label: Classic (use anaconda or debian-installer)
+ public_network_assignment:
+ assign_to_all_nodes:
+ description: When disabled, public network will be assigned to controllers
+ and zabbix-server only
+ label: Assign public network to all nodes
+ type: checkbox
+ value: false
+ weight: 10
+ metadata:
+ label: Public network assignment
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron'
+ weight: 50
+ storage:
+ ephemeral_ceph:
+ description: Configures Nova to store ephemeral volumes in RBD. This works
+ best if Ceph is enabled for volumes and images, too. Enables live migration
+ of all types of Ceph backed VMs (without this option, live migration will
+ only work with VMs launched from Cinder volumes).
+ label: Ceph RBD for ephemeral volumes (Nova)
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ type: checkbox
+ value: true
+ weight: 75
+ images_ceph:
+ description: Configures Glance to use the Ceph RBD backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: Ceph RBD for images (Glance)
+ type: checkbox
+ value: true
+ weight: 30
+ images_vcenter:
+ description: Configures Glance to use the vCenter/ESXi backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: VMWare vCenter/ESXi datastore for images (Glance)
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter'
+ type: checkbox
+ value: false
+ weight: 35
+ iser:
+ description: 'High performance block storage: Cinder volumes over iSER protocol
+ (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
+ and will use a dedicated virtual function for the storage network.'
+ label: iSER protocol for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+ != 'kvm'
+ type: checkbox
+ value: false
+ weight: 11
+ metadata:
+ label: Storage
+ weight: 60
+ objects_ceph:
+ description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+ Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+ label: Ceph RadosGW for objects (Swift API)
+ restrictions:
+ - settings:storage.images_ceph.value == false
+ type: checkbox
+ value: false
+ weight: 80
+ osd_pool_size:
+ description: Configures the default number of object replicas in Ceph. This
+ number must be equal to or lower than the number of deployed 'Storage -
+ Ceph OSD' nodes.
+ label: Ceph object replication factor
+ regex:
+ error: Invalid number
+ source: ^[1-9]\d*$
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ type: text
+ value: '2'
+ weight: 85
+ vc_datacenter:
+ description: Inventory path to a datacenter. If you want to use ESXi host
+ as datastore, it should be "ha-datacenter".
+ label: Datacenter name
+ regex:
+ error: Empty datacenter
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 65
+ vc_datastore:
+ description: Datastore associated with the datacenter.
+ label: Datastore name
+ regex:
+ error: Empty datastore
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 60
+ vc_host:
+ description: IP Address of vCenter/ESXi
+ label: vCenter/ESXi IP
+ regex:
+ error: Specify valid IPv4 address
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 45
+ vc_image_dir:
+ description: The name of the directory where the glance images will be stored
+ in the VMware datastore.
+ label: Datastore Images directory
+ regex:
+ error: Empty images directory
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: /openstack_glance
+ weight: 70
+ vc_password:
+ description: vCenter/ESXi admin password
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: password
+ value: ''
+ weight: 55
+ vc_user:
+ description: vCenter/ESXi admin username
+ label: Username
+ regex:
+ error: Empty username
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 50
+ volumes_ceph:
+ description: Configures Cinder to store volumes in Ceph RBD images.
+ label: Ceph RBD for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
+ == 'vcenter'
+ type: checkbox
+ value: true
+ weight: 20
+ volumes_lvm:
+ description: Requires at least one Storage - Cinder LVM node.
+ label: Cinder LVM over iSCSI for volumes
+ restrictions:
+ - settings:storage.volumes_ceph.value == true
+ type: checkbox
+ value: false
+ weight: 10
+ volumes_vmdk:
+ description: Configures Cinder to store volumes via VMware vCenter.
+ label: VMware vCenter for volumes (Cinder)
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
+ == true
+ type: checkbox
+ value: false
+ weight: 15
+ syslog:
+ metadata:
+ label: Syslog
+ weight: 50
+ syslog_port:
+ description: Remote syslog port
+ label: Port
+ regex:
+ error: Invalid Syslog port
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '514'
+ weight: 20
+ syslog_server:
+ description: Remote syslog hostname
+ label: Hostname
+ type: text
+ value: ''
+ weight: 10
+ syslog_transport:
+ label: Syslog transport protocol
+ type: radio
+ value: tcp
+ values:
+ - data: udp
+ description: ''
+ label: UDP
+ - data: tcp
+ description: ''
+ label: TCP
+ weight: 30
+ vcenter:
+ cluster:
+ description: vCenter cluster name. If you have multiple clusters, use comma
+ to separate names
+ label: Cluster
+ regex:
+ error: Invalid cluster list
+ source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
+ type: text
+ value: ''
+ weight: 40
+ datastore_regex:
+ description: The Datastore regexp setting specifies the data stores to use
+ with Compute. For example, "nas.*". If you want to use all available datastores,
+ leave this field blank
+ label: Datastore regexp
+ regex:
+ error: Invalid datastore regexp
+ source: ^(\S.*\S|\S|)$
+ type: text
+ value: ''
+ weight: 50
+ host_ip:
+ description: IP Address of vCenter
+ label: vCenter IP
+ regex:
+ error: Specify valid IPv4 address
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+ type: text
+ value: ''
+ weight: 10
+ metadata:
+ label: vCenter
+ restrictions:
+ - action: hide
+ condition: settings:common.libvirt_type.value != 'vcenter'
+ weight: 20
+ use_vcenter:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 5
+ vc_password:
+ description: vCenter admin password
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: admin
+ weight: 30
+ vc_user:
+ description: vCenter admin username
+ label: Username
+ regex:
+ error: Empty username
+ source: \S
+ type: text
+ value: admin
+ weight: 20
+ vlan_interface:
+ description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
+ vmnic1). If empty "vmnic0" is used by default
+ label: ESXi VLAN interface
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
+ != 'VlanManager'
+ type: text
+ value: ''
+ weight: 60
+ zabbix:
+ metadata:
+ label: Zabbix Access
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ weight: 70
+ password:
+ description: Password for Zabbix Administrator
+ label: password
+ type: password
+ value: zabbix
+ weight: 20
+ username:
+ description: Username for Zabbix Administrator
+ label: username
+ type: text
+ value: admin
+ weight: 10 \ No newline at end of file
diff --git a/fuel/deploy/libvirt/conf/ha/dha.yaml b/fuel/deploy/libvirt/conf/ha/dha.yaml
new file mode 100644
index 0000000..d862f64
--- /dev/null
+++ b/fuel/deploy/libvirt/conf/ha/dha.yaml
@@ -0,0 +1,42 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Sat Apr 25 16:26:22 UTC 2015
+comment: Small libvirt setup
+
+# Adapter to use for this definition
+adapter: libvirt
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+ libvirtName: controller1
+ libvirtTemplate: libvirt/vms/controller.xml
+- id: 2
+ libvirtName: compute1
+ libvirtTemplate: libvirt/vms/compute.xml
+- id: 3
+ libvirtName: compute2
+ libvirtTemplate: libvirt/vms/compute.xml
+- id: 4
+ libvirtName: compute3
+ libvirtTemplate: libvirt/vms/compute.xml
+- id: 5
+ libvirtName: compute4
+ libvirtTemplate: libvirt/vms/compute.xml
+- id: 6
+ libvirtName: compute5
+ libvirtTemplate: libvirt/vms/compute.xml
+- id: 7
+ libvirtName: fuel-master
+ libvirtTemplate: libvirt/vms/fuel.xml
+ isFuel: yes
+ username: root
+ password: r00tme
+
+disks:
+ fuel: 30G
+ controller: 30G
+ compute: 30G
diff --git a/fuel/deploy/libvirt/dea.yaml b/fuel/deploy/libvirt/conf/multinode/dea.yaml
index 802293f..dfd8382 100644
--- a/fuel/deploy/libvirt/dea.yaml
+++ b/fuel/deploy/libvirt/conf/multinode/dea.yaml
@@ -3,34 +3,34 @@ title: Deployment Environment Adapter (DEA)
version: 1.1
created: Sat Apr 25 16:26:22 UTC 2015
comment: Small libvirt setup
-environment_name: opnfv59-b
+environment_name: opnfv_virt
environment_mode: multinode
wanted_release: Juno on Ubuntu 12.04.4
nodes:
- id: 1
- interfaces: interface1
- transformations: controller1
- role: controller
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
- id: 2
- interfaces: interface1
- transformations: controller1
- role: controller
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
- id: 3
- interfaces: interface1
- transformations: controller1
- role: controller
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
- id: 4
- interfaces: interface1
- transformations: compute1
- role: compute
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
- id: 5
- interfaces: interface1
- transformations: compute1
- role: compute
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
- id: 6
- interfaces: interface1
- transformations: compute1
- role: compute
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
fuel:
ADMIN_NETWORK:
ipaddress: 10.20.0.2
@@ -43,12 +43,12 @@ fuel:
FUEL_ACCESS:
user: admin
password: admin
- HOSTNAME: opnfv59
+ HOSTNAME: opnfv_virt
NTP1: 0.pool.ntp.org
NTP2: 1.pool.ntp.org
NTP3: 2.pool.ntp.org
interfaces:
- interface1:
+ interfaces_1:
eth0:
- fuelweb_admin
- management
@@ -59,7 +59,7 @@ interfaces:
eth3:
- public
transformations:
- controller1:
+ transformations_1:
- action: add-br
name: br-eth0
- action: add-port
@@ -126,7 +126,7 @@ transformations:
bridges:
- br-eth2
- br-prv
- compute1:
+ transformations_2:
- action: add-br
name: br-eth0
- action: add-port
@@ -692,14 +692,14 @@ settings:
restrictions:
- settings:common.libvirt_type.value == 'vcenter'
type: checkbox
- value: false
+ value: true
weight: 75
images_ceph:
description: Configures Glance to use the Ceph RBD backend to store images.
If enabled, this option will prevent Swift from installing.
label: Ceph RBD for images (Glance)
type: checkbox
- value: false
+ value: true
weight: 30
images_vcenter:
description: Configures Glance to use the vCenter/ESXi backend to store images.
@@ -833,7 +833,7 @@ settings:
- settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
== 'vcenter'
type: checkbox
- value: false
+ value: true
weight: 20
volumes_lvm:
description: Requires at least one Storage - Cinder LVM node.
@@ -841,7 +841,7 @@ settings:
restrictions:
- settings:storage.volumes_ceph.value == true
type: checkbox
- value: true
+ value: false
weight: 10
volumes_vmdk:
description: Configures Cinder to store volumes via VMware vCenter.
@@ -973,4 +973,4 @@ settings:
label: username
type: text
value: admin
- weight: 10
+ weight: 10 \ No newline at end of file
diff --git a/fuel/deploy/libvirt/conf/multinode/dha.yaml b/fuel/deploy/libvirt/conf/multinode/dha.yaml
new file mode 100644
index 0000000..5e560bf
--- /dev/null
+++ b/fuel/deploy/libvirt/conf/multinode/dha.yaml
@@ -0,0 +1,42 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Sat Apr 25 16:26:22 UTC 2015
+comment: Small libvirt setup
+
+# Adapter to use for this definition
+adapter: libvirt
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+ libvirtName: controller1
+ libvirtTemplate: libvirt/vms/controller.xml
+- id: 2
+ libvirtName: controller2
+ libvirtTemplate: libvirt/vms/compute.xml
+- id: 3
+ libvirtName: controller3
+ libvirtTemplate: libvirt/vms/compute.xml
+- id: 4
+ libvirtName: compute1
+ libvirtTemplate: libvirt/vms/compute.xml
+- id: 5
+ libvirtName: compute2
+ libvirtTemplate: libvirt/vms/compute.xml
+- id: 6
+ libvirtName: compute3
+ libvirtTemplate: libvirt/vms/compute.xml
+- id: 7
+ libvirtName: fuel-master
+ libvirtTemplate: libvirt/vms/fuel.xml
+ isFuel: yes
+ username: root
+ password: r00tme
+
+disks:
+ fuel: 30G
+ controller: 30G
+ compute: 30G
diff --git a/fuel/deploy/libvirt/dha.yaml b/fuel/deploy/libvirt/dha.yaml
deleted file mode 100644
index ce61e53..0000000
--- a/fuel/deploy/libvirt/dha.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-title: Deployment Hardware Adapter (DHA)
-# DHA API version supported
-version: 1.1
-created: Sat Apr 25 16:26:22 UTC 2015
-comment: Small libvirt setup
-
-# Adapter to use for this definition
-adapter: libvirt
-
-# Node list.
-# Mandatory fields are id and role.
-# The MAC address of the PXE boot interface is not mandatory
-# to be set, but the field must be present.
-# All other fields are adapter specific.
-
-nodes:
-- id: 1
- pxeMac: 52:54:00:aa:dd:84
- libvirtName: controller1
- libvirtTemplate: controller
- role: controller
-- id: 2
- pxeMac: 52:54:00:aa:dd:84
- libvirtName: controller2
- libvirtTemplate: controller
- role: controller
-- id: 3
- pxeMac: 52:54:00:aa:dd:84
- libvirtName: controller3
- libvirtTemplate: controller
- role: controller
-- id: 4
- pxeMac: 52:54:00:41:64:f3
- libvirtName: compute1
- libvirtTemplate: compute
- role: compute
-- id: 5
- pxeMac: 52:54:00:69:a0:79
- libvirtName: compute2
- libvirtTemplate: compute
- role: compute
-- id: 6
- pxeMac: 52:54:00:69:a0:79
- libvirtName: compute3
- libvirtTemplate: compute
- role: compute
-- id: 7
- pxeMac: 52:54:00:f8:b0:75
- libvirtName: fuel-master
- libvirtTemplate: fuel-master
- isFuel: yes
- nodeCanZeroMBR: yes
- nodeCanSetBootOrderLive: yes
- username: root
- password: r00tme
-
-disks:
- fuel: 30G
- controller: 30G
- compute: 30G
-
-# Deployment power on strategy
-# all: Turn on all nodes at once. There will be no correlation
-# between the DHA and DEA node numbering. MAC addresses
-# will be used to select the node roles though.
-# sequence: Turn on the nodes in sequence starting with the lowest order
-# node and wait for the node to be detected by Fuel. Not until
-# the node has been detected and assigned a role will the next
-# node be turned on.
-powerOnStrategy: all
-
-# If fuelCustomInstall is set to true, Fuel is assumed to be installed by
-# calling the DHA adapter function "dha_fuelCustomInstall()" with two
-# arguments: node ID and the ISO file name to deploy. The custom install
-# function is then to handle all necessary logic to boot the Fuel master
-# from the ISO and then return.
-# Allowed values: true, false
-
-fuelCustomInstall: false
-
diff --git a/fuel/deploy/libvirt/networks/fuel1 b/fuel/deploy/libvirt/networks/fuel1.xml
index 7b2b154..7b2b154 100644
--- a/fuel/deploy/libvirt/networks/fuel1
+++ b/fuel/deploy/libvirt/networks/fuel1.xml
diff --git a/fuel/deploy/libvirt/networks/fuel2 b/fuel/deploy/libvirt/networks/fuel2.xml
index 615c920..615c920 100644
--- a/fuel/deploy/libvirt/networks/fuel2
+++ b/fuel/deploy/libvirt/networks/fuel2.xml
diff --git a/fuel/deploy/libvirt/networks/fuel3 b/fuel/deploy/libvirt/networks/fuel3.xml
index 2383e6c..2383e6c 100644
--- a/fuel/deploy/libvirt/networks/fuel3
+++ b/fuel/deploy/libvirt/networks/fuel3.xml
diff --git a/fuel/deploy/libvirt/networks/fuel4 b/fuel/deploy/libvirt/networks/fuel4.xml
index 5b69f91..5b69f91 100644
--- a/fuel/deploy/libvirt/networks/fuel4
+++ b/fuel/deploy/libvirt/networks/fuel4.xml
diff --git a/fuel/deploy/libvirt/vms/compute b/fuel/deploy/libvirt/vms/compute.xml
index 7591509..2ea35ac 100644
--- a/fuel/deploy/libvirt/vms/compute
+++ b/fuel/deploy/libvirt/vms/compute.xml
@@ -1,5 +1,5 @@
<domain type='kvm'>
- <name>compute4</name>
+ <name>compute</name>
<memory unit='KiB'>8388608</memory>
<currentMemory unit='KiB'>8388608</currentMemory>
<vcpu placement='static'>2</vcpu>
diff --git a/fuel/deploy/libvirt/vms/controller b/fuel/deploy/libvirt/vms/controller.xml
index a871262..4377879 100644
--- a/fuel/deploy/libvirt/vms/controller
+++ b/fuel/deploy/libvirt/vms/controller.xml
@@ -1,5 +1,5 @@
<domain type='kvm'>
- <name>controller1</name>
+ <name>controller</name>
<memory unit='KiB'>2097152</memory>
<currentMemory unit='KiB'>2097152</currentMemory>
<vcpu placement='static'>2</vcpu>
diff --git a/fuel/deploy/libvirt/vms/fuel-master b/fuel/deploy/libvirt/vms/fuel.xml
index f4e652b..1a32860 100644
--- a/fuel/deploy/libvirt/vms/fuel-master
+++ b/fuel/deploy/libvirt/vms/fuel.xml
@@ -1,5 +1,5 @@
<domain type='kvm'>
- <name>fuel-master</name>
+ <name>fuel</name>
<memory unit='KiB'>2097152</memory>
<currentMemory unit='KiB'>2097152</currentMemory>
<vcpu placement='static'>2</vcpu>
diff --git a/fuel/deploy/reap.py b/fuel/deploy/reap.py
new file mode 100644
index 0000000..8a8681a
--- /dev/null
+++ b/fuel/deploy/reap.py
@@ -0,0 +1,330 @@
+import common
+import time
+import os
+import yaml
+import glob
+import shutil
+
+N = common.N
+E = common.E
+R = common.R
+ArgParser = common.ArgParser
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+log = common.log
+delete_file = common.delete_file
+commafy = common.commafy
+
+DEA_1 = '''
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: {date}
+comment: {comment}
+'''
+
+DHA_1 = '''
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: {date}
+comment: {comment}
+
+# Adapter to use for this definition
+# adapter: [ipmi|libvirt]
+adapter:
+
+# Node list.
+# Mandatory properties are id and role.
+# All other properties are adapter specific.
+# For Non-Fuel nodes controlled by:
+# - ipmi adapter you need to provide:
+# pxeMac
+# ipmiIp
+# ipmiUser
+# ipmiPass
+# - libvirt adapter you need to provide:
+# libvirtName: <whatever>
+# libvirtTemplate: [libvirt/vms/controller.xml | libvirt/vms/compute.xml]
+#
+# For the Fuel Node you need to provide:
+# libvirtName: <whatever>
+# libvirtTemplate: libvirt/vms/fuel.xml
+# isFuel: yes
+# username: root
+# password: r00tme
+'''
+
+DHA_2 = '''
+# Adding the Fuel node as node id {node_id}
+# which may not be correct - please adjust as needed.
+'''
+
+DISKS = {'fuel': '30G',
+ 'controller': '30G',
+ 'compute': '30G'}
+
+class Reap(object):
+
+ def __init__(self, dea_file, dha_file, comment):
+ self.dea_file = dea_file
+ self.dha_file = dha_file
+ self.comment = comment
+ self.temp_dir = None
+ self.env = None
+ self.env_id = None
+ self.last_node = None
+
+ def get_env(self):
+ env_list = parse(exec_cmd('fuel env'))
+ if len(env_list) > 1:
+ err('Not exactly one environment')
+ self.env = env_list[0]
+ self.env_id = self.env[E['id']]
+
+ def download_config(self, config_type):
+ log('Download %s config for environment %s'
+ % (config_type, self.env_id))
+ exec_cmd('fuel %s --env %s --download --dir %s'
+ % (config_type, self.env_id, self.temp_dir))
+
+ def write(self, file, text, newline=True):
+ mode = 'a' if os.path.isfile(file) else 'w'
+ with open(file, mode) as f:
+ f.write('%s%s' % (text, ('\n' if newline else '')))
+
+ def write_yaml(self, file, data, newline=True):
+ self.write(file, yaml.dump(data, default_flow_style=False).strip(),
+ newline)
+
+ def get_node_by_id(self, node_list, node_id):
+ for node in node_list:
+ if node[N['id']] == node_id:
+ return node
+
+ def reap_interface(self, node_id, interfaces):
+ interface, mac = self.get_interface(node_id)
+ if_name = None
+ if interfaces:
+ if_name = self.check_dict_exists(interfaces, interface)
+ if not if_name:
+ if_name = 'interfaces_%s' % str(len(interfaces) + 1)
+ interfaces[if_name] = interface
+ return if_name, mac
+
+ def reap_transformation(self, node_id, roles, transformations):
+ main_role = 'controller' if 'controller' in roles else 'compute'
+ node_file = glob.glob('%s/deployment_%s/*%s_%s.yaml'
+ % (self.temp_dir, self.env_id,
+ main_role, node_id))
+ tr_name = None
+ with open(node_file[0]) as f:
+ node_config = yaml.load(f)
+ transformation = node_config['network_scheme']['transformations']
+ if transformations:
+ tr_name = self.check_dict_exists(transformations, transformation)
+ if not tr_name:
+ tr_name = 'transformations_%s' % str(len(transformations) + 1)
+ transformations[tr_name] = transformation
+ return tr_name
+
+ def check_dict_exists(self, main_dict, dict):
+ for key, val in main_dict.iteritems():
+ if cmp(dict, val) == 0:
+ return key
+
+ def reap_nodes_interfaces_transformations(self):
+ node_list = parse(exec_cmd('fuel node'))
+ real_node_ids = [node[N['id']] for node in node_list]
+ real_node_ids.sort()
+ min_node = real_node_ids[0]
+
+ interfaces = {}
+ transformations = {}
+ dea_nodes = []
+ dha_nodes = []
+
+ for real_node_id in real_node_ids:
+ node_id = int(real_node_id) - int(min_node) + 1
+ self.last_node = node_id
+ node = self.get_node_by_id(node_list, real_node_id)
+ roles = commafy(node[N['roles']])
+ if not roles:
+ err('Fuel Node %s has no role' % real_node_id)
+ dea_node = {'id': node_id,
+ 'role': roles}
+ dha_node = {'id': node_id}
+ if_name, mac = self.reap_interface(real_node_id, interfaces)
+ tr_name = self.reap_transformation(real_node_id, roles,
+ transformations)
+ dea_node.update(
+ {'interfaces': if_name,
+ 'transformations': tr_name})
+
+ dha_node.update(
+ {'pxeMac': mac if mac else None,
+ 'ipmiIp': None,
+ 'ipmiUser': None,
+ 'ipmiPass': None,
+ 'libvirtName': None,
+ 'libvirtTemplate': None})
+
+ dea_nodes.append(dea_node)
+ dha_nodes.append(dha_node)
+
+ self.write_yaml(self.dha_file, {'nodes': dha_nodes}, False)
+ self.write_yaml(self.dea_file, {'nodes': dea_nodes})
+ self.write_yaml(self.dea_file, {'interfaces': interfaces})
+ self.write_yaml(self.dea_file, {'transformations': transformations})
+ self.reap_fuel_node_info()
+ self.write_yaml(self.dha_file, {'disks': DISKS})
+
+ def reap_fuel_node_info(self):
+ dha_nodes = []
+ dha_node = {
+ 'id': self.last_node + 1,
+ 'libvirtName': None,
+ 'libvirtTemplate': None,
+ 'isFuel': True,
+ 'username': 'root',
+ 'password': 'r00tme'}
+
+ dha_nodes.append(dha_node)
+
+ self.write(self.dha_file, DHA_2.format(node_id=dha_node['id']), False)
+ self.write_yaml(self.dha_file, dha_nodes)
+
+ def reap_environment_info(self):
+ self.write_yaml(self.dea_file,
+ {'environment_name': self.env[E['name']]})
+ self.write_yaml(self.dea_file,
+ {'environment_mode': self.env[E['mode']]})
+ wanted_release = None
+ rel_list = parse(exec_cmd('fuel release'))
+ for rel in rel_list:
+ if rel[R['id']] == self.env[E['release_id']]:
+ wanted_release = rel[R['name']]
+ self.write_yaml(self.dea_file, {'wanted_release': wanted_release})
+
+ def reap_fuel_settings(self):
+ data = self.read_yaml('/etc/fuel/astute.yaml')
+ fuel = {}
+ del(data['ADMIN_NETWORK']['mac'])
+ del(data['ADMIN_NETWORK']['interface'])
+ for key in ['ADMIN_NETWORK', 'HOSTNAME', 'DNS_DOMAIN', 'DNS_SEARCH',
+ 'DNS_UPSTREAM', 'NTP1', 'NTP2', 'NTP3', 'FUEL_ACCESS']:
+ fuel[key] = data[key]
+ self.write_yaml(self.dea_file, {'fuel': fuel})
+
+ def reap_network_settings(self):
+ network_file = ('%s/network_%s.yaml'
+ % (self.temp_dir, self.env_id))
+ data = self.read_yaml(network_file)
+ network = {}
+ network['networking_parameters'] = data['networking_parameters']
+ network['networks'] = data['networks']
+ for net in network['networks']:
+ del net['id']
+ del net['group_id']
+ self.write_yaml(self.dea_file, {'network': network})
+
+ def reap_settings(self):
+ settings_file = '%s/settings_%s.yaml' % (self.temp_dir, self.env_id)
+ settings = self.read_yaml(settings_file)
+ self.write_yaml(self.dea_file, {'settings': settings})
+
+ def get_opnfv_astute(self, role):
+ node_files = glob.glob('%s/deployment_%s/*%s*.yaml'
+ % (self.temp_dir, self.env_id, role))
+ node_config = self.read_yaml(node_files[0])
+ return node_config['opnfv'] if 'opnfv' in node_config else {}
+
+ def reap_opnfv_astute(self):
+ controller_opnfv_astute = self.get_opnfv_astute('controller')
+ compute_opnfv_astute = self.get_opnfv_astute('compute')
+ opnfv = {}
+ opnfv['opnfv'] = {
+ 'controller': controller_opnfv_astute,
+ 'compute': compute_opnfv_astute}
+ self.write_yaml(self.dea_file, opnfv)
+
+ def get_interface(self, real_node_id):
+ exec_cmd('fuel node --node-id %s --network --download --dir %s'
+ % (real_node_id, self.temp_dir))
+ interface_file = ('%s/node_%s/interfaces.yaml'
+ % (self.temp_dir, real_node_id))
+ interfaces = self.read_yaml(interface_file)
+ interface_config = {}
+ pxe_mac = None
+ for interface in interfaces:
+ networks = []
+ for network in interface['assigned_networks']:
+ networks.append(network['name'])
+ if network['name'] == 'fuelweb_admin':
+ pxe_mac = interface['mac']
+ if networks:
+ interface_config[interface['name']] = networks
+ return interface_config, pxe_mac
+
+ def read_yaml(self, yaml_file):
+ with open(yaml_file) as f:
+ data = yaml.load(f)
+ return data
+
+ def intro(self):
+ delete_file(self.dea_file)
+ delete_file(self.dha_file)
+ self.temp_dir = exec_cmd('mktemp -d')
+ date = time.strftime('%c')
+ self.write(self.dea_file,
+ DEA_1.format(date=date, comment=self.comment), False)
+ self.write(self.dha_file,
+ DHA_1.format(date=date, comment=self.comment))
+ self.get_env()
+ self.download_config('deployment')
+ self.download_config('settings')
+ self.download_config('network')
+
+ def finale(self):
+ log('DEA file is available at %s' % self.dea_file)
+ log('DHA file is available at %s (this is just a template)'
+ % self.dha_file)
+ shutil.rmtree(self.temp_dir)
+
+ def reap(self):
+ self.intro()
+ self.reap_environment_info()
+ self.reap_nodes_interfaces_transformations()
+ self.reap_fuel_settings()
+ self.reap_opnfv_astute()
+ self.reap_network_settings()
+ self.reap_settings()
+ self.finale()
+
+def usage():
+ print '''
+ Usage:
+ python reap.py <dea_file> <dha_file> <comment>
+ '''
+
+def parse_arguments():
+ parser = ArgParser(prog='python %s' % __file__)
+ parser.add_argument('dea_file', nargs='?', action='store',
+ default='dea.yaml',
+ help='Deployment Environment Adapter: dea.yaml')
+ parser.add_argument('dha_file', nargs='?', action='store',
+ default='dha.yaml',
+ help='Deployment Hardware Adapter: dha.yaml')
+ parser.add_argument('comment', nargs='?', action='store', help='Comment')
+ args = parser.parse_args()
+ return (args.dea_file, args.dha_file, args.comment)
+
+def main():
+ dea_file, dha_file, comment = parse_arguments()
+
+ r = Reap(dea_file, dha_file, comment)
+ r.reap()
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/fuel/deploy/setup_environment.py b/fuel/deploy/setup_environment.py
deleted file mode 100644
index 4e0e7ba..0000000
--- a/fuel/deploy/setup_environment.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import sys
-from lxml import etree
-import os
-import glob
-import common
-
-from dha import DeploymentHardwareAdapter
-
-exec_cmd = common.exec_cmd
-err = common.err
-log = common.log
-check_dir_exists = common.check_dir_exists
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
-
-
-class LibvirtEnvironment(object):
-
- def __init__(self, storage_dir, dha_file):
- self.dha = DeploymentHardwareAdapter(dha_file)
- self.storage_dir = storage_dir
- self.parser = etree.XMLParser(remove_blank_text=True)
- self.file_dir = os.path.dirname(os.path.realpath(__file__))
- self.network_dir = '%s/libvirt/networks' % self.file_dir
- self.vm_dir = '%s/libvirt/vms' % self.file_dir
- self.node_ids = self.dha.get_all_node_ids()
- self.fuel_node_id = self.dha.get_fuel_node_id()
- self.net_names = self.collect_net_names()
-
- def create_storage(self, node_id, disk_path, disk_sizes):
- if node_id == self.fuel_node_id:
- disk_size = disk_sizes['fuel']
- else:
- role = self.dha.get_node_role(node_id)
- disk_size = disk_sizes[role]
- exec_cmd('fallocate -l %s %s' % (disk_size, disk_path))
-
- def create_vms(self):
- temp_dir = exec_cmd('mktemp -d')
- disk_sizes = self.dha.get_disks()
- for node_id in self.node_ids:
- vm_name = self.dha.get_node_property(node_id, 'libvirtName')
- vm_template = self.dha.get_node_property(node_id,
- 'libvirtTemplate')
- disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
- self.create_storage(node_id, disk_path, disk_sizes)
- self.define_vm(vm_name, vm_template, temp_dir, disk_path)
- exec_cmd('rm -fr %s' % temp_dir)
-
- def define_vm(self, vm_name, vm_template, temp_dir, disk_path):
- log('Creating VM %s with disks %s' % (vm_name, disk_path))
- temp_vm_file = '%s/%s' % (temp_dir, vm_name)
- exec_cmd('cp %s/%s %s' % (self.vm_dir, vm_template, temp_vm_file))
- with open(temp_vm_file) as f:
- vm_xml = etree.parse(f)
- names = vm_xml.xpath('/domain/name')
- for name in names:
- name.text = vm_name
- uuids = vm_xml.xpath('/domain/uuid')
- for uuid in uuids:
- uuid.getparent().remove(uuid)
- disks = vm_xml.xpath('/domain/devices/disk')
- for disk in disks:
- sources = disk.xpath('source')
- for source in sources:
- source.set('file', disk_path)
- with open(temp_vm_file, 'w') as f:
- vm_xml.write(f, pretty_print=True, xml_declaration=True)
- exec_cmd('virsh define %s' % temp_vm_file)
-
- def create_networks(self):
- for net_file in glob.glob('%s/*' % self.network_dir):
- exec_cmd('virsh net-define %s' % net_file)
- for net in self.net_names:
- log('Creating network %s' % net)
- exec_cmd('virsh net-autostart %s' % net)
- exec_cmd('virsh net-start %s' % net)
-
- def delete_networks(self):
- for net in self.net_names:
- log('Deleting network %s' % net)
- exec_cmd('virsh net-destroy %s' % net, False)
- exec_cmd('virsh net-undefine %s' % net, False)
-
- def get_net_name(self, net_file):
- with open(net_file) as f:
- net_xml = etree.parse(f)
- name_list = net_xml.xpath('/network/name')
- for name in name_list:
- net_name = name.text
- return net_name
-
- def collect_net_names(self):
- net_list = []
- for net_file in glob.glob('%s/*' % self.network_dir):
- name = self.get_net_name(net_file)
- net_list.append(name)
- return net_list
-
- def delete_vms(self):
- for node_id in self.node_ids:
- vm_name = self.dha.get_node_property(node_id, 'libvirtName')
- r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
- if c > 0:
- log(r)
- continue
- self.undefine_vm_delete_disk(r, vm_name)
-
- def undefine_vm_delete_disk(self, printout, vm_name):
- disk_files = []
- xml_dump = etree.fromstring(printout, self.parser)
- disks = xml_dump.xpath('/domain/devices/disk')
- for disk in disks:
- sources = disk.xpath('source')
- for source in sources:
- source_file = source.get('file')
- if source_file:
- disk_files.append(source_file)
- log('Deleting VM %s with disks %s' % (vm_name, disk_files))
- exec_cmd('virsh destroy %s' % vm_name, False)
- exec_cmd('virsh undefine %s' % vm_name, False)
- for file in disk_files:
- exec_cmd('rm -f %s' % file)
-
- def setup_environment(self):
- check_if_root()
- check_dir_exists(self.network_dir)
- check_dir_exists(self.vm_dir)
- self.cleanup_environment()
- self.create_vms()
- self.create_networks()
-
- def cleanup_environment(self):
- self.delete_vms()
- self.delete_networks()
-
-
-def usage():
- print '''
- Usage:
- python setup_environment.py <storage_directory> <dha_file>
-
- Example:
- python setup_environment.py /mnt/images dha.yaml
- '''
-
-def parse_arguments():
- if len(sys.argv) != 3:
- log('Incorrect number of arguments')
- usage()
- sys.exit(1)
- storage_dir = sys.argv[-2]
- dha_file = sys.argv[-1]
- check_dir_exists(storage_dir)
- check_file_exists(dha_file)
- return storage_dir, dha_file
-
-def main():
- storage_dir, dha_file = parse_arguments()
-
- virt = LibvirtEnvironment(storage_dir, dha_file)
- virt.setup_environment()
-
-if __name__ == '__main__':
- main() \ No newline at end of file
diff --git a/fuel/deploy/setup_execution_environment.py b/fuel/deploy/setup_execution_environment.py
new file mode 100644
index 0000000..d97fcde
--- /dev/null
+++ b/fuel/deploy/setup_execution_environment.py
@@ -0,0 +1,36 @@
+import yaml
+import io
+import sys
+import os
+
+import common
+from environments.libvirt_environment import LibvirtEnvironment
+from environments.virtual_fuel import VirtualFuel
+from dea import DeploymentEnvironmentAdapter
+
+exec_cmd = common.exec_cmd
+err = common.err
+log = common.log
+check_dir_exists = common.check_dir_exists
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+ArgParser = common.ArgParser
+
+class ExecutionEnvironment(object):
+ def __new__(cls, storage_dir, pxe_bridge, dha_path, dea):
+
+ with io.open(dha_path) as yaml_file:
+ dha_struct = yaml.load(yaml_file)
+
+ type = dha_struct['adapter']
+
+ root_dir = os.path.dirname(os.path.realpath(__file__))
+
+ if cls is ExecutionEnvironment:
+ if type == 'libvirt':
+ return LibvirtEnvironment(storage_dir, dha_path, dea, root_dir)
+
+ if type == 'ipmi' or type == 'hp':
+ return VirtualFuel(storage_dir, pxe_bridge, dha_path, root_dir)
+
+ return super(ExecutionEnvironment, cls).__new__(cls)
diff --git a/fuel/deploy/setup_vfuel.py b/fuel/deploy/setup_vfuel.py
deleted file mode 100644
index 65ee013..0000000
--- a/fuel/deploy/setup_vfuel.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import sys
-from lxml import etree
-import os
-
-import common
-from dha import DeploymentHardwareAdapter
-
-exec_cmd = common.exec_cmd
-err = common.err
-log = common.log
-check_dir_exists = common.check_dir_exists
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
-
-VFUELNET = '''
-iface vfuelnet inet static
- bridge_ports em1
- address 10.40.0.1
- netmask 255.255.255.0
- pre-down iptables -t nat -D POSTROUTING --out-interface p1p1.20 -j MASQUERADE -m comment --comment "vfuelnet"
- pre-down iptables -D FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
- post-up iptables -t nat -A POSTROUTING --out-interface p1p1.20 -j MASQUERADE -m comment --comment "vfuelnet"
- post-up iptables -A FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
-'''
-VM_DIR = 'baremetal/vm'
-FUEL_DISK_SIZE = '30G'
-IFACE = 'vfuelnet'
-INTERFACE_CONFIG = '/etc/network/interfaces'
-
-class VFuel(object):
-
- def __init__(self, storage_dir, dha_file):
- self.dha = DeploymentHardwareAdapter(dha_file)
- self.storage_dir = storage_dir
- self.parser = etree.XMLParser(remove_blank_text=True)
- self.fuel_node_id = self.dha.get_fuel_node_id()
- self.file_dir = os.path.dirname(os.path.realpath(__file__))
- self.vm_dir = '%s/%s' % (self.file_dir, VM_DIR)
-
- def setup_environment(self):
- check_if_root()
- check_dir_exists(self.vm_dir)
- self.setup_networking()
- self.delete_vm()
- self.create_vm()
-
- def setup_networking(self):
- with open(INTERFACE_CONFIG) as f:
- data = f.read()
- if VFUELNET not in data:
- log('Appending to file %s:\n %s' % (INTERFACE_CONFIG, VFUELNET))
- with open(INTERFACE_CONFIG, 'a') as f:
- f.write('\n%s\n' % VFUELNET)
- if exec_cmd('ip link show | grep %s' % IFACE):
- log('Bring DOWN interface %s' % IFACE)
- exec_cmd('ifdown %s' % IFACE, False)
- log('Bring UP interface %s' % IFACE)
- exec_cmd('ifup %s' % IFACE, False)
-
- def delete_vm(self):
- vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
- r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
- if c > 0:
- log(r)
- return
- self.undefine_vm_delete_disk(r, vm_name)
-
- def undefine_vm_delete_disk(self, printout, vm_name):
- disk_files = []
- xml_dump = etree.fromstring(printout, self.parser)
- disks = xml_dump.xpath('/domain/devices/disk')
- for disk in disks:
- sources = disk.xpath('source')
- for source in sources:
- source_file = source.get('file')
- if source_file:
- disk_files.append(source_file)
- log('Deleting VM %s with disks %s' % (vm_name, disk_files))
- exec_cmd('virsh destroy %s' % vm_name, False)
- exec_cmd('virsh undefine %s' % vm_name, False)
- for file in disk_files:
- exec_cmd('rm -f %s' % file)
-
- def create_vm(self):
- temp_dir = exec_cmd('mktemp -d')
- vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
- vm_template = self.dha.get_node_property(self.fuel_node_id,
- 'libvirtTemplate')
- disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
- exec_cmd('fallocate -l %s %s' % (FUEL_DISK_SIZE, disk_path))
- self.define_vm(vm_name, vm_template, temp_dir, disk_path)
- exec_cmd('rm -fr %s' % temp_dir)
-
- def define_vm(self, vm_name, vm_template, temp_dir, disk_path):
- log('Creating VM %s with disks %s' % (vm_name, disk_path))
- temp_vm_file = '%s/%s' % (temp_dir, vm_name)
- exec_cmd('cp %s/%s %s' % (self.vm_dir, vm_template, temp_vm_file))
- with open(temp_vm_file) as f:
- vm_xml = etree.parse(f)
- names = vm_xml.xpath('/domain/name')
- for name in names:
- name.text = vm_name
- uuids = vm_xml.xpath('/domain/uuid')
- for uuid in uuids:
- uuid.getparent().remove(uuid)
- disks = vm_xml.xpath('/domain/devices/disk')
- for disk in disks:
- sources = disk.xpath('source')
- for source in sources:
- source.set('file', disk_path)
- with open(temp_vm_file, 'w') as f:
- vm_xml.write(f, pretty_print=True, xml_declaration=True)
- exec_cmd('virsh define %s' % temp_vm_file)
-
-
-def usage():
- print '''
- Usage:
- python setup_vfuel.py <storage_directory> <dha_file>
-
- Example:
- python setup_vfuel.py /mnt/images dha.yaml
- '''
-
-def parse_arguments():
- if len(sys.argv) != 3:
- log('Incorrect number of arguments')
- usage()
- sys.exit(1)
- storage_dir = sys.argv[-2]
- dha_file = sys.argv[-1]
- check_dir_exists(storage_dir)
- check_file_exists(dha_file)
- return storage_dir, dha_file
-
-def main():
- storage_dir, dha_file = parse_arguments()
-
- vfuel = VFuel(storage_dir, dha_file)
- vfuel.setup_environment()
-
-if __name__ == '__main__':
- main()
diff --git a/fuel/deploy/ssh_client.py b/fuel/deploy/ssh_client.py
index 9ea227a..8bf87bc 100644
--- a/fuel/deploy/ssh_client.py
+++ b/fuel/deploy/ssh_client.py
@@ -6,6 +6,7 @@ TIMEOUT = 600
log = common.log
err = common.err
+
class SSHClient(object):
def __init__(self, host, username, password):
@@ -18,7 +19,8 @@ class SSHClient(object):
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.client.connect(self.host, username=self.username,
- password=self.password, timeout=timeout)
+ password=self.password, look_for_keys=False,
+ timeout=timeout)
def close(self):
if self.client is not None:
@@ -60,16 +62,14 @@ class SSHClient(object):
if chan.recv_ready():
data = chan.recv(1024)
while data:
- print data
+ log(data.strip())
data = chan.recv(1024)
if chan.recv_stderr_ready():
error_buff = chan.recv_stderr(1024)
while error_buff:
- print error_buff
+ log(error_buff.strip())
error_buff = chan.recv_stderr(1024)
- exit_status = chan.recv_exit_status()
- log('Exit status %s' % exit_status)
def scp_get(self, remote, local='.', dir=False):
try: