summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/Makefile6
-rw-r--r--build/bash_completion_apex56
-rwxr-xr-xbuild/build_ovs_nsh.sh22
-rw-r--r--build/nics-template.yaml.jinja236
-rwxr-xr-xbuild/overcloud-full.sh39
-rwxr-xr-xbuild/overcloud-onos.sh8
-rwxr-xr-xbuild/overcloud-opendaylight-sfc.sh11
-rw-r--r--build/rpm_specs/opnfv-apex-common.spec10
-rw-r--r--build/set_perf_images.sh1
-rwxr-xr-xbuild/undercloud.sh2
-rw-r--r--build/variables.sh12
-rw-r--r--ci/PR_revision.log4
-rwxr-xr-xci/deploy.sh53
-rwxr-xr-xci/util.sh18
-rw-r--r--config/deploy/deploy_settings.yaml70
-rw-r--r--config/network/network_settings.yaml302
-rw-r--r--config/network/network_settings_v6.yaml285
-rw-r--r--config/network/network_settings_vlans.yaml287
-rw-r--r--docs/installationprocedure/architecture.rst87
-rw-r--r--docs/installationprocedure/baremetal.rst14
-rw-r--r--docs/installationprocedure/references.rst2
-rw-r--r--docs/installationprocedure/requirements.rst15
-rw-r--r--docs/installationprocedure/virtualinstall.rst2
-rw-r--r--docs/release-notes/release-notes.rst247
-rw-r--r--docs/releasenotes/index.rst (renamed from docs/release-notes/index.rst)0
-rw-r--r--docs/releasenotes/release-notes.rst384
-rw-r--r--lib/common-functions.sh10
-rwxr-xr-xlib/configure-deps-functions.sh26
-rwxr-xr-xlib/overcloud-deploy-functions.sh33
-rwxr-xr-xlib/parse-functions.sh48
-rwxr-xr-xlib/post-install-functions.sh30
-rw-r--r--lib/python/apex/common/constants.py15
-rw-r--r--lib/python/apex/common/utils.py2
-rw-r--r--lib/python/apex/deploy_settings.py2
-rw-r--r--lib/python/apex/inventory.py2
-rw-r--r--lib/python/apex/network_environment.py128
-rw-r--r--lib/python/apex/network_settings.py321
-rwxr-xr-xlib/python/apex_python_utils.py44
-rwxr-xr-xlib/undercloud-functions.sh111
-rwxr-xr-xlib/virtual-setup-functions.sh10
-rw-r--r--tests/config/network_settings_duplicate_nic.yaml115
-rw-r--r--tests/config/network_settings_missing_required_nic.yaml113
-rw-r--r--tests/config/network_settings_nic1_reserved.yaml113
-rw-r--r--tests/config/network_settings_nics_not_specified.yaml107
-rw-r--r--tests/test_apex_deploy_settings.py16
-rw-r--r--tests/test_apex_network_environment.py119
-rw-r--r--tests/test_apex_network_settings.py142
-rw-r--r--tests/test_apex_python_utils_py.py6
48 files changed, 1887 insertions, 1599 deletions
diff --git a/build/Makefile b/build/Makefile
index 89aa647c..b002ed6e 100644
--- a/build/Makefile
+++ b/build/Makefile
@@ -19,9 +19,6 @@ export RPMODL = $(shell pwd)/noarch/opnfv-apex-$(RPMVERS)-$(shell echo ${RELEASE
export RPMONO = $(shell pwd)/noarch/opnfv-apex-onos-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
export RPMSFC = $(shell pwd)/noarch/opnfv-apex-opendaylight-sfc-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
-all_networks="admin_network private_network storage_network external_network api_network"
-
-
.PHONY: all
all: iso
@@ -204,7 +201,7 @@ networking-vpp.noarch.rpm: networking-vpp
mv networking-vpp/dist/*.rpm networking-vpp.noarch.rpm
networking-vpp:
- git clone -b stable https://github.com/naveenjoy/networking-vpp.git
+ git clone -b stable_vlan_rewrite https://github.com/fepan/networking-vpp.git
###############
# UNDERCLOUD #
@@ -400,6 +397,7 @@ iso: iso-clean images rpms $(CENTISO)
cd centos/Packages && yumdownloader ipxe-roms-qemu
cd centos/Packages && curl -O https://radez.fedorapeople.org/python34-markupsafe-0.23-9.el7.centos.x86_64.rpm
cd centos/Packages && curl -O https://radez.fedorapeople.org/python3-jinja2-2.8-5.el7.centos.noarch.rpm
+ cd centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python3-ipmi-0.3.0-1.noarch.rpm
# regenerate yum repo data
@echo "Generating new yum metadata"
createrepo --update -g ../c7-opnfv-x86_64-comps.xml centos
diff --git a/build/bash_completion_apex b/build/bash_completion_apex
new file mode 100644
index 00000000..b3c963e3
--- /dev/null
+++ b/build/bash_completion_apex
@@ -0,0 +1,56 @@
+# bash/zsh completion support for OPNFV Apex
+##############################################################################
+# Copyright (c) 2016 Dan Radez (Red Hat) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Pieces of this script are derived from the git bash completion script
+
+___main () {
+ local cur prev opts
+ COMPREPLY=()
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ prev="${COMP_WORDS[COMP_CWORD-1]}"
+ opts=" -h $(${COMP_WORDS[0]} -h | grep -Eo '^ [^ ]+')"
+ if [[ ! $opts =~ $prev ]]; then
+ COMPREPLY=($(compgen -W "${opts}" -- ${cur}))
+ fi
+}
+
+# these functions are setup like this in the thought that
+# deploy and util will eventually diverge from each other
+# for now they can use the same main logic so it's just
+# abstracted to another function
+__deploy_main () {
+ ___main
+}
+
+
+__util_main () {
+ ___main
+}
+
+
+__apex_func_wrap () {
+ local cur words cword prev
+ _get_comp_words_by_ref -n =: cur words cword prev
+ $1
+}
+
+# Setup function for bash completion
+__apex_complete () {
+ local wrapper="__apex_wrap${2}"
+ eval "$wrapper () { __apex_func_wrap $2 ; }"
+ complete -o bashdefault -o default -o nospace -F $wrapper $1 2>/dev/null \
+ || complete -o default -o nospace -F $wrapper $1
+}
+
+# run completion setup
+__apex_complete ./deploy.sh __deploy_main
+__apex_complete opnfv-deploy __deploy_main
+__apex_complete ./util.sh __util_main
+__apex_complete opnfv-util __util_main
diff --git a/build/build_ovs_nsh.sh b/build/build_ovs_nsh.sh
new file mode 100755
index 00000000..834df5bb
--- /dev/null
+++ b/build/build_ovs_nsh.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+##############################################################################
+# Copyright (c) 2016 Tim Rozet (Red Hat) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -e
+
+yum -y install rpm-build autoconf automake libtool systemd-units openssl openssl-devel python python-twisted-core python-zope-interface python-six desktop-file-utils groff graphviz procps-ng libcap-ng libcap-ng-devel PyQt4 selinux-policy-devel kernel-devel kernel-headers kernel-tools
+./boot.sh
+libtoolize --force
+aclocal
+autoheader
+automake --force-missing --add-missing
+autoconf
+./configure
+yum -y install rpmdevtools
+make rpm-fedora RPMBUILD_OPT="\"-D kversion `rpm -q kernel | rpmdev-sort | tail -n -1 | sed 's/^kernel-//'`\" --without check"
+make rpm-fedora-kmod RPMBUILD_OPT="\"-D kversion `rpm -q kernel | rpmdev-sort | tail -n -1 | sed 's/^kernel-//'`\""
diff --git a/build/nics-template.yaml.jinja2 b/build/nics-template.yaml.jinja2
index 0680a26f..ee830114 100644
--- a/build/nics-template.yaml.jinja2
+++ b/build/nics-template.yaml.jinja2
@@ -85,16 +85,16 @@ resources:
os_net_config:
network_config:
-
- {%- if vlans['private_network'] is number or vlans['storage_network'] is number or vlans['api_network'] is number or vlans['public_network'] is number %}
+ {%- if nets['tenant']['nic_mapping'][role]['vlan'] is number or nets['storage']['nic_mapping'][role]['vlan'] is number or nets['api']['nic_mapping'][role]['vlan'] is number or nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
type: ovs_bridge
name: {get_input: bridge_name}
members:
-
type: interface
- name: {{ nics[role]['admin_network'] }}
+ name: {{ nets[role]['admin']['members'][0] }}
# force the MAC address of the bridge to this interface
primary: true
- {%- if 'public_network' in enabled_networks and vlans['public_network'] is number %}
+ {%- if nets['external'][0]['enabled'] and nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
-
type: vlan
vlan_id: {get_param: ExternalNetworkVlanID}
@@ -106,7 +106,7 @@ resources:
default: true
next_hop: {get_param: ExternalInterfaceDefaultRoute}
{%- endif %}
- {%- if 'private_network' in enabled_networks and vlans['private_network'] is number %}
+ {%- if nets['tenant']['enabled'] and nets['tenant']['nic_mapping'][role]['vlan'] is number %}
-
type: vlan
vlan_id: {get_param: TenantNetworkVlanID}
@@ -114,7 +114,7 @@ resources:
-
ip_netmask: {get_param: TenantIpSubnet}
{%- endif %}
- {%- if 'storage_network' in enabled_networks and vlans['storage_network'] is number %}
+ {%- if nets['storage']['enabled'] and nets['storage']['nic_mapping'][role]['vlan'] is number %}
-
type: vlan
vlan_id: {get_param: StorageNetworkVlanID}
@@ -122,7 +122,7 @@ resources:
-
ip_netmask: {get_param: StorageIpSubnet}
{%- endif %}
- {%- if 'api_network' in enabled_networks and vlans['api_network'] is number %}
+ {%- if nets['api']['enabled'] and nets['api']['nic_mapping'][role]['vlan'] is number %}
-
type: vlan
vlan_id: {get_param: InternalApiNetworkVlanID}
@@ -132,7 +132,7 @@ resources:
{%- endif %}
{%- else %}
type: interface
- name: {{ nics[role]['admin_network'] }}
+ name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
{%- endif %}
use_dhcp: false
dns_servers: {get_param: DnsServers}
@@ -153,7 +153,7 @@ resources:
next_hop: {get_param: ControlPlaneDefaultRoute}
{%- endif %}
- {%- if 'private_network' in enabled_networks and vlans['private_network'] == 'native' %}
+ {%- if nets['tenant']['enabled'] and nets['tenant']['nic_mapping'][role]['vlan'] == 'native' %}
{%- if ovs_dpdk_bridge == 'br-phy' and role == 'compute' %}
-
type: ovs_bridge
@@ -165,7 +165,7 @@ resources:
members:
-
type: interface
- name: {{ nics[role]['private_network'] }}
+ name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
# force the MAC address of the bridge to this interface
primary: true
-
@@ -175,17 +175,17 @@ resources:
{%- else %}
-
type: interface
- name: {{ nics[role]['private_network'] }}
+ name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
use_dhcp: false
addresses:
-
ip_netmask: {get_param: TenantIpSubnet}
{%- endif %}
{%- endif %}
- {%- if 'public_network' in enabled_networks and external_net_type == 'interface' and vlans['public_network'] == 'native' %}
+ {%- if nets['external'][0]['enabled'] and external_net_type == 'interface' and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
-
type: interface
- name: {{ nics[role]['public_network'] }}
+ name: {{ nets['external'][0]['nic_mapping'][role]['members'][0] }}
{%- if role == 'controller' %}
dns_servers: {get_param: DnsServers}
{%- endif %}
@@ -200,7 +200,7 @@ resources:
{%- endif %}
ip_netmask: 0.0.0.0/0
next_hop: {get_param: ExternalInterfaceDefaultRoute}
- {%- elif 'public_network' in enabled_networks and external_net_type == 'br-ex' and vlans['public_network'] == 'native' %}
+ {%- elif nets['external'][0]['enabled'] and external_net_type == 'br-ex' and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
-
type: ovs_bridge
name: {get_input: bridge_name}
@@ -208,7 +208,7 @@ resources:
members:
-
type: interface
- name: {{ nics[role]['public_network'] }}
+ name: {{ nets['external'][0]['nic_mapping'][role]['members'][0] }}
# force the MAC address of the bridge to this interface
primary: true
{%- if role == 'controller' %}
@@ -223,19 +223,19 @@ resources:
next_hop: {get_param: ExternalInterfaceDefaultRoute}
{%- endif %}
{%- endif %}
- {%- if 'storage_network' in enabled_networks and vlans['storage_network'] == 'native' %}
+ {%- if nets['storage']['enabled'] and nets['storage']['nic_mapping'][role]['vlan'] == 'native' %}
-
type: interface
- name: {{ nics[role]['storage_network'] }}
+ name: {{ nets['storage']['nic_mapping'][role]['members'][0] }}
use_dhcp: false
addresses:
-
ip_netmask: {get_param: StorageIpSubnet}
{%- endif %}
- {%- if 'api_network' in enabled_networks and vlans['api_network'] == 'native' %}
+ {%- if nets['api']['enabled'] and nets['api']['nic_mapping'][role]['vlan'] == 'native' %}
-
type: interface
- name: {{ nics[role]['api_network'] }}
+ name: {{ nets['api']['nic_mapping'][role]['members'][0] }}
use_dhcp: false
addresses:
-
diff --git a/build/overcloud-full.sh b/build/overcloud-full.sh
index 21667070..88f96851 100755
--- a/build/overcloud-full.sh
+++ b/build/overcloud-full.sh
@@ -154,5 +154,44 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--upload ../puppet-neutron/manifests/plugins/ml2/networking-vpp.pp:/etc/puppet/modules/neutron/manifests/plugins/ml2/ \
-a overcloud-full_build.qcow2
+rm -rf ovs_nsh_patches
+rm -rf ovs
+git clone https://github.com/yyang13/ovs_nsh_patches.git
+git clone https://github.com/openvswitch/ovs.git
+pushd ovs > /dev/null
+git reset --hard 7d433ae57ebb90cd68e8fa948a096f619ac4e2d8
+cp ../ovs_nsh_patches/*.patch ./
+# Hack for build servers that have no git config
+git config user.email "apex@opnfv.com"
+git config user.name "apex"
+git am *.patch
+popd > /dev/null
+tar czf ovs.tar.gz ovs
+
+# Required packages to redirect stdin with virt-customize
+virt_pkg_str="./$libguestfs_pkg "
+wget $virt_uri_base/$libguestfs_pkg
+for package in ${virt_pkgs[@]}; do
+ wget "$virt_uri_base/$package"
+ virt_pkg_str+=" ./$package"
+done
+
+if ! sudo yum -y install ${virt_pkg_str}; then
+ if [ "$(rpm -q libguestfs)" != "$(rpm -qpf $libguestfs_pkg)" ]; then
+ echo "ERROR: Failed to update libguestfs"
+ exit 1
+ fi
+fi
+
+
+
+# BUILD NSH OVS
+LIBGUESTFS_BACKEND=direct virt-customize \
+ --upload ../build_ovs_nsh.sh:/root/ \
+ --upload ovs.tar.gz:/root/ \
+ --run-command "cd /root/ && tar xzf ovs.tar.gz" \
+ --run-command "cd /root/ovs && /root/build_ovs_nsh.sh" \
+ -a overcloud-full_build.qcow2
+
mv -f overcloud-full_build.qcow2 overcloud-full.qcow2
popd > /dev/null
diff --git a/build/overcloud-onos.sh b/build/overcloud-onos.sh
index 7b5e3df7..b695983a 100755
--- a/build/overcloud-onos.sh
+++ b/build/overcloud-onos.sh
@@ -19,12 +19,8 @@ cp -f overcloud-full.qcow2 overcloud-full-onos_build.qcow2
#######################################
# upgrade ovs into ovs 2.5.90 with NSH function
-curl -L -O ${onos_ovs_uri}/${onos_ovs_pkg}
-tar -xzf ${onos_ovs_pkg}
-LIBGUESTFS_BACKEND=direct virt-customize --upload ${ovs_kmod_rpm_name}:/root/ \
- --run-command "yum install -y /root/${ovs_kmod_rpm_name}" \
- --upload ${ovs_rpm_name}:/root/ \
- --run-command "yum upgrade -y /root/${ovs_rpm_name}" \
+LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum install -y /root/ovs/rpm/rpmbuild/RPMS/x86_64/${ovs_kmod_rpm_name}" \
+ --run-command "yum upgrade -y /root/ovs/rpm/rpmbuild/RPMS/x86_64/${ovs_rpm_name}" \
-a overcloud-full-onos_build.qcow2
diff --git a/build/overcloud-opendaylight-sfc.sh b/build/overcloud-opendaylight-sfc.sh
index 612f483d..444d284a 100755
--- a/build/overcloud-opendaylight-sfc.sh
+++ b/build/overcloud-opendaylight-sfc.sh
@@ -19,15 +19,8 @@ pushd images > /dev/null
cp -f overcloud-full-opendaylight.qcow2 overcloud-full-opendaylight-sfc_build.qcow2
# upgrade ovs into ovs 2.5.90 with NSH function
-if ! [[ -f "$ovs_rpm_name" && -f "$ovs_kmod_rpm_name" ]]; then
- curl -L -O ${onos_ovs_uri}/${onos_ovs_pkg}
- tar -xzf ${onos_ovs_pkg}
-fi
-
-LIBGUESTFS_BACKEND=direct virt-customize --upload ${ovs_kmod_rpm_name}:/root/ \
- --run-command "yum install -y /root/${ovs_kmod_rpm_name}" \
- --upload ${ovs_rpm_name}:/root/ \
- --run-command "yum upgrade -y /root/${ovs_rpm_name}" \
+LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum install -y /root/ovs/rpm/rpmbuild/RPMS/x86_64/${ovs_kmod_rpm_name}" \
+ --run-command "yum upgrade -y /root/ovs/rpm/rpmbuild/RPMS/x86_64/${ovs_rpm_name}" \
-a overcloud-full-opendaylight-sfc_build.qcow2
mv overcloud-full-opendaylight-sfc_build.qcow2 overcloud-full-opendaylight-sfc.qcow2
diff --git a/build/rpm_specs/opnfv-apex-common.spec b/build/rpm_specs/opnfv-apex-common.spec
index 8fd241b4..6ad5782f 100644
--- a/build/rpm_specs/opnfv-apex-common.spec
+++ b/build/rpm_specs/opnfv-apex-common.spec
@@ -23,7 +23,7 @@ https://wiki.opnfv.org/apex
%build
rst2html docs/installationprocedure/index.rst docs/installation-instructions.html
-rst2html docs/release-notes/release-notes.rst docs/release-notes.html
+rst2html docs/releasenotes/release-notes.rst docs/release-notes.html
%global __python %{__python3}
@@ -33,6 +33,9 @@ install ci/deploy.sh %{buildroot}%{_bindir}/opnfv-deploy
install ci/clean.sh %{buildroot}%{_bindir}/opnfv-clean
install ci/util.sh %{buildroot}%{_bindir}/opnfv-util
+mkdir -p %{buildroot}%{_sysconfdir}/bash_completion.d/
+install build/bash_completion_apex %{buildroot}%{_sysconfdir}/bash_completion.d/apex
+
mkdir -p %{buildroot}%{_sysconfdir}/opnfv-apex/
install config/deploy/os-nosdn-nofeature-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-noha.yaml
install config/deploy/os-nosdn-fdio-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-fdio-noha.yaml
@@ -81,7 +84,7 @@ install lib/installer/domain.xml %{buildroot}%{_var}/opt/opnfv/lib/installer/
mkdir -p %{buildroot}%{_docdir}/opnfv/
install LICENSE.rst %{buildroot}%{_docdir}/opnfv/
install docs/installation-instructions.html %{buildroot}%{_docdir}/opnfv/
-install docs/release-notes/index.rst %{buildroot}%{_docdir}/opnfv/release-notes.rst
+install docs/releasenotes/index.rst %{buildroot}%{_docdir}/opnfv/release-notes.rst
install docs/release-notes.html %{buildroot}%{_docdir}/opnfv/
install config/deploy/deploy_settings.yaml %{buildroot}%{_docdir}/opnfv/deploy_settings.yaml.example
install config/network/network_settings.yaml %{buildroot}%{_docdir}/opnfv/network_settings.yaml.example
@@ -105,6 +108,7 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%{python3_sitelib}/apex/
%{_var}/opt/opnfv/lib/installer/onos/onos_gw_mac_update.sh
%{_var}/opt/opnfv/lib/installer/domain.xml
+%{_sysconfdir}/bash_completion.d/apex
%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-fdio-noha.yaml
%{_sysconfdir}/opnfv-apex/os-nosdn-ovs-noha.yaml
@@ -132,6 +136,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%doc %{_docdir}/opnfv/inventory.yaml.example
%changelog
+* Fri Sep 16 2016 Dan Radez <dradez@redhat.com> - 3.0-13
+- adding bash completion script
* Tue Aug 30 2016 Tim Rozet <trozet@redhat.com> - 3.0-12
- Add clean library.
* Mon Aug 1 2016 Tim Rozet <trozet@redhat.com> - 3.0-11
diff --git a/build/set_perf_images.sh b/build/set_perf_images.sh
index 0025cc75..d91c20ec 100644
--- a/build/set_perf_images.sh
+++ b/build/set_perf_images.sh
@@ -19,6 +19,7 @@ for ROLE in $@; do
KERNEL=$(glance image-show overcloud-full | grep 'kernel_id' | cut -d '|' -f 3 | xargs)
RAMDISK_ID=$(glance image-show ${RAMDISK} | grep id | awk {'print $4'})
glance image-create --name $ROLE-overcloud-full --disk-format qcow2 --file $ROLE-overcloud-full.qcow2 --container-format bare --property ramdisk_id=$RAMDISK_ID --property kernel_id=$KERNEL --is-public True
+ rm -f $ROLE-overcloud-full.qcow2
fi
if [ "$ROLE" == "Controller" ]; then
diff --git a/build/undercloud.sh b/build/undercloud.sh
index a4d008ee..3cc56009 100755
--- a/build/undercloud.sh
+++ b/build/undercloud.sh
@@ -62,7 +62,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \
# Add custom IPA to allow kernel params
wget https://raw.githubusercontent.com/trozet/ironic-python-agent/opnfv_kernel/ironic_python_agent/extensions/image.py
-python3.4 -c 'import py_compile; py_compile.compile("image.py", cfile="image.pyc")'
+python3 -c 'import py_compile; py_compile.compile("image.py", cfile="image.pyc")'
# Add performance image scripts
LIBGUESTFS_BACKEND=direct virt-customize --upload ../build_perf_image.sh:/home/stack \
diff --git a/build/variables.sh b/build/variables.sh
index 990ac836..0308580d 100644
--- a/build/variables.sh
+++ b/build/variables.sh
@@ -39,3 +39,15 @@ honeycomb_pkg='honeycomb-1.0.0-1609.noarch.rpm'
ovs_rpm_name=openvswitch-2.5.90-1.el7.centos.x86_64.rpm
ovs_kmod_rpm_name=openvswitch-kmod-2.5.90-1.el7.centos.x86_64.rpm
+
+virt_uri_base=https://people.redhat.com/~rjones/libguestfs-RHEL-7.3-preview
+libguestfs_pkg='libguestfs-1.32.7-3.el7.x86_64.rpm'
+virt_pkgs=(
+'libguestfs-tools-1.32.7-3.el7.noarch.rpm'
+'libguestfs-tools-c-1.32.7-3.el7.x86_64.rpm'
+'supermin-5.1.16-4.el7.x86_64.rpm'
+'supermin5-5.1.16-4.el7.x86_64.rpm'
+'supermin-helper-5.1.16-4.el7.x86_64.rpm'
+'perl-Sys-Guestfs-1.32.7-3.el7.x86_64.rpm'
+'python-libguestfs-1.32.7-3.el7.x86_64.rpm'
+)
diff --git a/ci/PR_revision.log b/ci/PR_revision.log
index a911d04a..f7b832e6 100644
--- a/ci/PR_revision.log
+++ b/ci/PR_revision.log
@@ -38,3 +38,7 @@
76,Add networking-vpp ML2 mechanism driver
77,Update FDIO to use opendaylight_v2 mechanism driver
78,Fix spelling mistake in specs filter
+79,Fix controller and compute ip array
+80,Change TenantNIC and PublicNIC to be role specific
+81,Fix duplicate NeutronServicePlugins
+82,Fixes neutron sdnvpn config resource
diff --git a/ci/deploy.sh b/ci/deploy.sh
index 0a2eec2e..edc6062a 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -25,8 +25,6 @@ green=$(tput setaf 2 || echo "")
interactive="FALSE"
ping_site="8.8.8.8"
ntp_server="pool.ntp.org"
-net_isolation_enabled="TRUE"
-net_isolation_arg=""
post_config="TRUE"
debug="FALSE"
@@ -36,23 +34,24 @@ declare -A deploy_options_array
declare -a performance_options
declare -A NET_MAP
+APEX_TMP_DIR=$(python3 -c "import tempfile; print(tempfile.mkdtemp())")
SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
DEPLOY_OPTIONS=""
CONFIG=${CONFIG:-'/var/opt/opnfv'}
RESOURCES=${RESOURCES:-"$CONFIG/images"}
LIB=${LIB:-"$CONFIG/lib"}
-OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network api_network"
+OPNFV_NETWORK_TYPES="admin tenant external storage api"
VM_CPUS=4
VM_RAM=8
VM_COMPUTES=1
# Netmap used to map networks to OVS bridge names
-NET_MAP['admin_network']="br-admin"
-NET_MAP['private_network']="br-private"
-NET_MAP['public_network']="br-public"
-NET_MAP['storage_network']="br-storage"
-NET_MAP['api_network']="br-api"
+NET_MAP['admin']="br-admin"
+NET_MAP['tenant']="br-tenant"
+NET_MAP['external']="br-external"
+NET_MAP['storage']="br-storage"
+NET_MAP['api']="br-api"
ext_net_type="interface"
ip_address_family=4
@@ -77,11 +76,11 @@ done
display_usage() {
echo -e "Usage:\n$0 [arguments] \n"
- echo -e " -d|--deploy-settings : Full path to deploy settings yaml file. Optional. Defaults to null"
- echo -e " -i|--inventory : Full path to inventory yaml file. Required only for baremetal"
- echo -e " -n|--net-settings : Full path to network settings file. Optional."
- echo -e " -p|--ping-site : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8"
- echo -e " -v|--virtual : Virtualize overcloud nodes instead of using baremetal."
+ echo -e " --deploy-settings | -d : Full path to deploy settings yaml file. Optional. Defaults to null"
+ echo -e " --inventory | -i : Full path to inventory yaml file. Required only for baremetal"
+ echo -e " --net-settings | -n : Full path to network settings file. Optional."
+ echo -e " --ping-site | -p : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8"
+ echo -e " --virtual | -v : Virtualize overcloud nodes instead of using baremetal."
echo -e " --flat : disable Network Isolation and use a single flat network for the underlay network."
echo -e " --no-post-config : disable Post Install configuration."
echo -e " --debug : enable debug output."
@@ -96,7 +95,6 @@ display_usage() {
parse_cmdline() {
echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n"
echo "Use -h to display help"
- sleep 2
while [ "${1:0:1}" = "-" ]
do
@@ -129,12 +127,6 @@ parse_cmdline() {
echo "Executing a Virtual Deployment"
shift 1
;;
- --flat )
- net_isolation_enabled="FALSE"
- net_isolation_arg="--flat"
- echo "Underlay Network Isolation Disabled: using flat configuration"
- shift 1
- ;;
--no-post-config )
post_config="FALSE"
echo "Post install configuration disabled"
@@ -171,10 +163,9 @@ parse_cmdline() {
;;
esac
done
+ sleep 2
- if [[ ! -z "$NETSETS" && "$net_isolation_enabled" == "FALSE" ]]; then
- echo -e "${red}INFO: Single flat network requested. Only admin_network settings will be used!${reset}"
- elif [[ -z "$NETSETS" ]]; then
+ if [[ -z "$NETSETS" ]]; then
echo -e "${red}ERROR: You must provide a network_settings file with -n.${reset}"
exit 1
fi
@@ -185,7 +176,7 @@ parse_cmdline() {
echo -e "${red}ERROR: You should not specify an inventory file with virtual deployments${reset}"
exit 1
else
- INVENTORY_FILE='/tmp/inventory-virt.yaml'
+ INVENTORY_FILE="$APEX_TMP_DIR/inventory-virt.yaml"
fi
elif [[ -z "$INVENTORY_FILE" ]]; then
echo -e "${red}ERROR: You must specify an inventory file for baremetal deployments! Exiting...${reset}"
@@ -205,11 +196,6 @@ parse_cmdline() {
exit 1
fi
- if [[ "$net_isolation_enabled" == "FALSE" && "$post_config" == "TRUE" ]]; then
- echo -e "${blue}INFO: Post Install Configuration will be skipped. It is not supported with --flat${reset}"
- post_config="FALSE"
- fi
-
}
main() {
@@ -225,11 +211,10 @@ main() {
exit 1
fi
#Correct the time on the server prior to launching any VMs
- ntpdate $ntp_server
- if [ $? == 0 ]; then
+ if ntpdate $ntp_server; then
hwclock --systohc
- else
- echo -e "${red} ERROR: ntpdate failed to update the time on the server. ${reset}"
+ else
+ echo "${blue}WARNING: ntpdate failed to update the time on the server. ${reset}"
fi
setup_undercloud_vm
if [ "$virtual" == "TRUE" ]; then
@@ -247,7 +232,7 @@ main() {
fi
fi
if [[ "${deploy_options_array['sdn_controller']}" == 'onos' ]]; then
- if ! onos_update_gw_mac ${public_network_cidr} ${public_network_gateway}; then
+ if ! onos_update_gw_mac ${external_cidr} ${external_gateway}; then
echo -e "${red}ERROR:ONOS Post Install Configuration Failed, Exiting.${reset}"
exit 1
else
diff --git a/ci/util.sh b/ci/util.sh
index 8c09278a..480858d0 100755
--- a/ci/util.sh
+++ b/ci/util.sh
@@ -23,19 +23,19 @@ resolve_cmd() {
display_usage() {
echo -e "Usage:\n$0 subcommand [ arguments ]\n"
echo -e "Arguments:\n"
- echo -e " undercloud [ user [ command ] ] Connect to Undercloud VM as user and optionally execute a command\n"
- echo -e " user Optional: Defaults to 'stack'\n"
- echo -e " command Optional: Defaults to none\n"
+ echo -e " undercloud [ user [ command ] ] Connect to Undercloud VM as user and optionally execute a command"
+ echo -e " user Optional: Defaults to 'stack'"
+ echo -e " command Optional: Defaults to none"
echo -e ""
- echo -e " opendaylight Connect to OpenDaylight Karaf console\n"
+ echo -e " opendaylight Connect to OpenDaylight Karaf console"
echo -e ""
- echo -e " overcloud [ node [ command ] ] Connect to an Overcloud node and optionally execute a command\n"
- echo -e " node Required: in format controller|compute<number>. Example: controller0\n"
- echo -e " command Optional: Defaults to none\n"
+ echo -e " overcloud [ node [ command ] ] Connect to an Overcloud node and optionally execute a command"
+ echo -e " node Required: in format controller|compute<number>. Example: controller0"
+ echo -e " command Optional: Defaults to none"
echo -e ""
- echo -e " debug-stack Print parsed deployment failures to stdout \n"
+ echo -e " debug-stack Print parsed deployment failures to stdout"
echo -e ""
- echo -e " mock-detached on | off Add firewall rules to the jump host to mock a detached deployment \n"
+ echo -e " mock-detached on | off Add firewall rules to the jump host to mock a detached deployment\n"
}
##translates the command line argument
diff --git a/config/deploy/deploy_settings.yaml b/config/deploy/deploy_settings.yaml
index 82cc0811..e7821f18 100644
--- a/config/deploy/deploy_settings.yaml
+++ b/config/deploy/deploy_settings.yaml
@@ -1,10 +1,80 @@
+# The only global parameter at this time is ha_enabled, which will use
+# the tripleo ha architecture described here:
+# https://github.com/beekhof/osp-ha-deploy/blob/master/HA-keepalived.md
+# with 3 controllers by default
+#
+# If ha_enabled is false, there will only be one controller.
global_params:
ha_enabled: true
deploy_options:
+ # Which SDN controller to use. Valid options are 'opendaylight', 'onos',
+ # 'opendaylight-external', 'opencontrail' or false. A value of false will
+ # use Neutron's OVS ML2 controller.
sdn_controller: opendaylight
+
+ # Which version of ODL to use. This is only valid if 'opendaylight' was used
+ # above. If 'Boron' is specified, ODL Boron will be used. If no value is specified,
+ # Lithium will be used.
+ #odl_version: Boron
+
+ # Whether to configure ODL L3 support. This will disable the Neutron L3 Agent and
+ # use ODL instead.
sdn_l3: false
+
+ # Whether to install and configure Tacker (VNF Manager)
tacker: true
+
+ # Whether to configure Congress (policy as a service) datasources
+ # Note: Congress is already installed by default
congress: false
+
+ # Whether to configure ODL or ONOS with Service Function Chaining support. This
+ # requires the opnfv-apex-opendaylight-sfc package to be installed, since it
+ # uses a different overcloud image.
sfc: false
+
+ # Whether to configure ODL with SDNVPN support.
vpn: false
+
+ # Which dataplane to use for overcloud tenant networks. Valid options are
+ # 'ovs', 'ovs_dpdk' and 'fdio'.
+ dataplane : ovs
+
+ # Whether to install and configure fdio functionality in the overcloud
+ # The dataplane should be specified as fdio if this is set to true
+ vpp: false
+
+ # Whether to run vsperf after the install has completed
+ #vsperf: false
+
+ # Set performance options on specific roles. The valid roles are 'Compute', 'Controller'
+ # and 'Storage', and the valid sections are 'kernel' and 'nova'
+ #performance:
+ # Controller:
+ # kernel:
+ # # In this example, these three settings will be passed to the kernel boot line.
+ # # Any key/value pair can be entered here, so care should be taken to ensure that machines
+ # # do not fail to boot.
+ # #
+ # # isolcpus is generally used to push host processes off a particular core,
+ # # so that it can be dedicated to a specific process. On control nodes
+ # # this could be an ovs_dpdk process.
+ # isolcpus: 1
+ # # Hugepages are required for ovs_dpdk support.
+ # hugepage: 2M
+ # # intel_iommu is also required for ovs_dpdk support.
+ # intel_iommu: 'on'
+ # Compute:
+ # nova:
+ # # This is currently the only available option in the nova section. It will
+ # # add the provided string to vcpu_pin_set in nova.conf. This is used to pin
+ # # guest VMs to a set of CPU cores, and is decsribed in more detail here:
+ # # http://docs.openstack.org/mitaka/config-reference/compute/config-options.html
+ # libvirtpin: 1
+ # kernel:
+ # # On compute nodes, isolcpus is usually used to reserve cores for use either by VMs
+ # # or ovs_dpdk
+ # isolcpus: 0
+ # hugepage: 2M
+ # intel_iommu: 'on'
diff --git a/config/network/network_settings.yaml b/config/network/network_settings.yaml
index f7680643..ab9ed962 100644
--- a/config/network/network_settings.yaml
+++ b/config/network/network_settings.yaml
@@ -1,118 +1,220 @@
# This configuration file defines Network Environment for a
# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
+# for 5 following networks:
#
# - admin
-# - private*
-# - public
+# - tenant*
+# - external*
# - storage*
-#
+# - api*
# *) optional networks
#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
+# if not explicitly configured.
#
# See short description of the networks in the comments below.
#
+# "admin" is the short name for Control Plane Network.
+# This network should be IPv4 even it is an IPv6 deployment
+# IPv6 does not have PXE boot support.
+# During OPNFV deployment it is used for node provisioning which will require
+# PXE booting as well as running a DHCP server on this network. Be sure to
+# disable any other DHCP/TFTP server on this network.
+#
+# "tenant" is the network used for tenant traffic.
+#
+# "external" is the network which should have internet or external
+# connectivity. External OpenStack networks will be configured to egress this
+# network. There can be multiple external networks, but only one assigned as
+# "public" which OpenStack public API's will register.
+#
+# "storage" is the network for storage I/O.
+#
+# "api" is an optional network for splitting out OpenStack service API
+# communication. This should be used for IPv6 deployments.
-# DNS Servers for all nodes, comma delimited list
-dns_servers: ["8.8.8.8", "8.8.4.4"]
-# Domain name to use for undercloud/overcloud nodes
-domain_name: 'opnfvapex.com'
+#Meta data for the network configuration
+network-config-metadata:
+ title: LF-POD-1 Network config
+ version: 0.1
+ created: Mon Dec 28 2015
+ comment: None
-# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
-#
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- compute_interface: nic1
- controller_interface: nic1
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
+# DNS Settings
+dns-domain: opnfvlf.org
+dns-search: opnfvlf.org
+dns_nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+# NTP servers
+ntp:
+ - 0.se.pool.ntp.org
+ - 1.se.pool.ntp.org
+# Syslog server
+syslog:
+ server: 10.128.1.24
+ transport: 'tcp'
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
-#
-private_network:
- enabled: true
- vlan: native
- cidr: 11.0.0.0/24
- compute_interface: nic2
- controller_interface: nic2
+networks: # Network configurations
+ admin: # Admin configuration (pxe and jumpstart),
+ enabled: true
+ installer_vm: # Network settings for the Installer VM on admin network
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ vlan: native # VLAN tag to use for this network on Installer VM, native means none
+ ip: 192.0.2.1 # IP to assign to Installer VM on this network
+ usable_ip_range:
+ - 192.0.2.11
+ - 192.0.2.99 # Usable ip range, if empty entire range is usable
+ gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
+ cidr: 192.0.2.0/24 # Subnet in CIDR format 192.168.1.0/24
+ dhcp_range:
+ - 192.0.2.2
+ - 192.0.2.10 # DHCP range for the admin network, if empty it will be automatically provisioned
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic1
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ members:
+ - nic1
+ #
+ tenant: # Tenant network configuration
+ enabled: true
+ cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
+ mtu: 64000 # Tenant network MTU
+ overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range:
+ # VNI, VLAN-ID, etc.
+ segmentation_type: vxlan # Tenant network segmentation type:
+ # vlan, vxlan, gre
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic2 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: native
+ members:
+ - nic2 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ #
+ external: # Can contain 1 or more external networks
+ - public: # "public" network will be the network the installer VM attaches to
+ enabled: true
+ mtu: 64000 # Public network MTU
+ installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ vlan: native
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ ip: 192.168.37.1 # IP to assign to Installer VM on this network
+ cidr: 192.168.37.0/24
+ gateway: 192.168.37.1
+ floating_ip_range:
+ - 192.168.37.200
+ - 192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
+ usable_ip_range:
+ - 192.168.37.10
+ - 192.168.37.199 # Usable IP range on the public network, usually this is a shared subnet
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic3
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: native
+ members:
+ - nic3
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: Public_internet
+ type: flat
+ gateway: 192.168.37.1
+ - private_cloud: # another external network
+ enabled: false
+ mtu: 64000
+ installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ vlan: 101
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ ip: 192.168.38.1 # IP to assign to Installer VM on this network
+ cidr: 192.168.38.0/24
+ gateway: 192.168.38.1
+ floating_ip_range:
+ - 192.168.38.200
+ - 192.168.38.220 #Range to allocate to floating IPs for the public network with Neutron
+ usable_ip_range:
+ - 192.168.38.10
+ - 192.168.38.199 # Usable IP range on the public network, usually this is a shared subnet
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 101 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic3 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 101
+ members:
+ - nic3
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: private_cloud
+ type: vlan
+ segmentation_id: 101
+ gateway: 192.168.38.1
+ #
+ storage: # Storage network configuration
+ enabled: true
+ cidr: 12.0.0.0/24 # Subnet in CIDR format
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic4 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: native
+ members:
+ - nic4
+ #
+ api: # API network configuration
+ enabled: false
+ cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
+ vlan: 13 # VLAN tag to use for Overcloud hosts on this network
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic5 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: native
+ members:
+ - nic5
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
-#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- compute_interface: nic3
- controller_interface: nic3
- vlan: native
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220
- usable_ip_range: 192.168.37.10,192.168.37.199
- provisioner_ip: 192.168.37.1
+# JOID specific settings
+joid:
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
-#
-storage_network:
- enabled: true
- vlan: native
- cidr: 12.0.0.0/24
- compute_interface: nic4
- controller_interface: nic4
+# Compass specific settings
+compass:
-#admin_network:
-# enabled: true
-# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond
-# bridged_interface: '' #Interface to bridge to for installer VM
-# bond_interfaces: '' #Interfaces to create bond with for installer VM
-# compute_interface: nic4 #Interface used for this network on the compute node. Can either be logical nic name like "nic1" or real name like "eth1"
-# controller_interface: nic4 #Interface used for this network on the controller node. Can either be logical nic name like "nic1" or real name like "eth1"
-# vlan: native #VLAN tag to use, native means none
-# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
-# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected
-# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet
-# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
-# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned
-# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes)
-#private_network:
-# enabled: false #If disabled, internal api traffic will collapse to admin_network
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
+# Apex specific settings
+apex:
+ networks:
+ admin:
+ introspection_range:
+ - 192.0.2.100
+ - 192.0.2.120 # Range used for introspection phase (examining nodes)
+# Fuel specific settings
+fuel:
diff --git a/config/network/network_settings_v6.yaml b/config/network/network_settings_v6.yaml
index dd2d066e..bfce3ab4 100644
--- a/config/network/network_settings_v6.yaml
+++ b/config/network/network_settings_v6.yaml
@@ -1,141 +1,184 @@
# This configuration file defines Network Environment for a
# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
+# for 5 following networks:
#
# - admin
-# - private*
-# - public
+# - tenant*
+# - external*
# - storage*
# - api*
-#
# *) optional networks
#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
+# if not explicitly configured.
#
# See short description of the networks in the comments below.
#
-
-# DNS Servers for all nodes, comma delimited list
-dns_servers: ["8.8.8.8", "8.8.4.4"]
-
# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
-# This network must be IPv4 currently.
-domain_name: 'opnfvapex.com'
-
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- compute_interface: nic1
- controller_interface: nic1
- vlan: native
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
-
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
-# This network must be IPv4 currently.
+# This network should be IPv4 even it is an IPv6 deployment
+# IPv6 does not have PXE boot support.
+# During OPNFV deployment it is used for node provisioning which will require
+# PXE booting as well as running a DHCP server on this network. Be sure to
+# disable any other DHCP/TFTP server on this network.
+#
+# "tenant" is the network used for tenant traffic.
+#
+# "external" is the network which should have internet or external
+# connectivity. External OpenStack networks will be configured to egress this
+# network. There can be multiple external networks, but only one assigned as
+# "public" which OpenStack public API's will register.
+#
+# "storage" is the network for storage I/O.
#
-private_network:
- enabled: true
- cidr: 11.0.0.0/24
- vlan: native
- cidr: 11.0.0.0/24
- compute_interface: nic2
- controller_interface: nic2
+# "api" is an optional network for splitting out OpenStack service API
+# communication. This should be used for IPv6 deployments.
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
-#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- compute_interface: nic3
- controller_interface: nic3
- vlan: native
- cidr: 2001:db8::/64
- gateway: 2001:db8::1
- provisioner_ip: 2001:db8::1
+#Meta data for the network configuration
+network-config-metadata:
+ title: LF-POD-1 Network config
+ version: 0.1
+ created: Mon Dec 28 2015
+ comment: None
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
-#
-storage_network:
- enabled: true
- cidr: fd00:fd00:fd00:2000::/64
- vlan: native
- compute_interface: nic4
- controller_interface: nic4
+# DNS Settings
+dns-domain: opnfvlf.org
+dns-search: opnfvlf.org
+dns_nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+# NTP servers
+ntp:
+ - 0.se.pool.ntp.org
+ - 1.se.pool.ntp.org
+# Syslog server
+syslog:
+ server: 10.128.1.24
+ transport: 'tcp'
+networks: # Network configurations
+ admin: # Admin configuration (pxe and jumpstart),
+ enabled: true
+ installer_vm: # Network settings for the Installer VM on admin network
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ vlan: native # VLAN tag to use for this network on Installer VM, native means none
+ ip: 192.0.2.1 # IP to assign to Installer VM on this network
+ usable_ip_range:
+ - 192.0.2.11
+ - 192.0.2.99 # Usable ip range, if empty entire range is usable
+ gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
+ cidr: 192.0.2.0/24 # Subnet in CIDR format 192.168.1.0/24
+ dhcp_range:
+ - 192.0.2.2
+ - 192.0.2.10 # DHCP range for the admin network, if empty it will be automatically provisioned
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic1
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ members:
+ - nic1
+ #
+ tenant: # Tenant network configuration
+ enabled: true
+ cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
+ mtu: 64000 # Tenant network MTU
+ overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range:
+ # VNI, VLAN-ID, etc.
+ segmentation_type: vxlan # Tenant network segmentation type:
+ # vlan, vxlan, gre
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic2 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: native
+ members:
+ - nic2 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ #
+ external: # Can contain 1 or more external networks
+ - public: # "public" network will be the network the installer VM attaches to
+ enabled: true
+ mtu: 64000 # Public network MTU
+ installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ vlan: native
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ ip: 2001:db8::1 # IP to assign to Installer VM on this network
+ cidr: 2001:db8::0/64
+ gateway: 2001:db8::1
+ floating_ip_range:
+ - 2001:db8:0:0:0:0:0:2
+ - 2001:db8:0:0:ffff:ffff:ffff:ffff
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic3
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: native
+ members:
+ - nic3
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: Public_internet
+ type: flat
+ gateway: 2001:db8::1
+ #
+ storage: # Storage network configuration
+ enabled: true
+ cidr: fd00:fd00:fd00:2000::/64 # Subnet in CIDR format
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic4 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: native
+ members:
+ - nic4
+ #
+ api: # API network configuration
+ enabled: true
+ cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
+ vlan: 13 # VLAN tag to use for Overcloud hosts on this network
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic5 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: native
+ members:
+ - nic5
-# "api" is an optional network used by internal openstack api services.
-api_network:
- enabled: true
- cidr: fd00:fd00:fd00:4000::/64
- vlan: native
- compute_interface: nic5
- controller_interface: nic5
+# JOID specific settings
+joid:
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#
-#private_network:
-# enabled: false #If disabled, underlay traffic will collapse to admin_network
-# ipv6: true #This flag is only needed if cidr is not provided, and bridged_interface
-# is used for address auto detection.
-#
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
-# ipv6: true #This flag is only needed if cidr is not provided, and bridged_interface
-# is used for address auto detection.
-#
-#api_network:
-# enabled: false #If disabled, api_network traffic will collapse to admin network
-# ipv6: true #This flag is only needed if cidr is not provided, and bridged_interface
-# is used for address auto detection.
-#
-#General behavior description (today's behavior, not necessarily optimal):
-# - If a network has cidr field defined, no auto detection will be done. All missing fields will be generated according to CIDR.
-# - If cidr is not specified, bridged_interface field must be specified. IP detection will be done on the system.
-# In this case, an optional ipv6 field can be specified to indicate what kind of IP discovery takes place.
-# - It is assumed the supplied cidr has enough address to generate all field today.
-# - If a field is specified, no auto generation (from cidr) or auto detection (from bridged_interface) will be performed.
-# It is assumed the value specified is correct.
-# - Any networks can be specified to be IPv6, but only private, storage and public SHOULD. No check is performed to validate this.
-#
-#Other changes
-# - All IP addresses can now be IPv4 or IPv6, we will detect the correct family and configure accordingly.
-# Note that if any network is specified to be IPv6, we consider it a IPv6 deployment. IPv6 deployment does require additional
-# configurations in resulting network environment yaml, such as enabling ipv6 support for NOVA, RABBITMQ, etc.
-# \ No newline at end of file
+# Compass specific settings
+compass:
+
+# Apex specific settings
+apex:
+ networks:
+ admin:
+ introspection_range:
+ - 192.0.2.100
+ - 192.0.2.120 # Range used for introspection phase (examining nodes)
+# Fuel specific settings
+fuel:
diff --git a/config/network/network_settings_vlans.yaml b/config/network/network_settings_vlans.yaml
index 40d6183e..beeae477 100644
--- a/config/network/network_settings_vlans.yaml
+++ b/config/network/network_settings_vlans.yaml
@@ -1,102 +1,219 @@
# This configuration file defines Network Environment for a
# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
+# for 5 following networks:
#
# - admin
-# - private*
-# - public
+# - tenant*
+# - external*
# - storage*
-#
+# - api*
# *) optional networks
#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
+# if not explicitly configured.
#
# See short description of the networks in the comments below.
#
-
# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
+# This network should be IPv4 even it is an IPv6 deployment
+# IPv6 does not have PXE boot support.
+# During OPNFV deployment it is used for node provisioning which will require
+# PXE booting as well as running a DHCP server on this network. Be sure to
+# disable any other DHCP/TFTP server on this network.
#
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
-
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
+# "tenant" is the network used for tenant traffic.
#
-private_network:
- enabled: true
- vlan: 400
- cidr: 11.0.0.0/24
-
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
+# "external" is the network which should have internet or external
+# connectivity. External OpenStack networks will be configured to egress this
+# network. There can be multiple external networks, but only one assigned as
+# "public" which OpenStack public API's will register.
#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- vlan: 500
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220
- usable_ip_range: 192.168.37.10,192.168.37.199
- provisioner_ip: 192.168.37.1
-
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
+# "storage" is the network for storage I/O.
#
-storage_network:
- enabled: true
- vlan: 200
- cidr: 12.0.0.0/24
+# "api" is an optional network for splitting out OpenStack service API
+# communication. This should be used for IPv6 deployments.
+
+
+#Meta data for the network configuration
+network-config-metadata:
+ title: LF-POD-1 Network config
+ version: 0.1
+ created: Mon Dec 28 2015
+ comment: None
+
+# DNS Settings
+dns-domain: opnfvlf.org
+dns-search: opnfvlf.org
+dns_nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+# NTP servers
+ntp:
+ - 0.se.pool.ntp.org
+ - 1.se.pool.ntp.org
+# Syslog server
+syslog:
+ server: 10.128.1.24
+ transport: 'tcp'
+
+networks: # Network configurations
+ admin: # Admin configuration (pxe and jumpstart),
+ enabled: true
+ installer_vm: # Network settings for the Installer VM on admin network
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ vlan: native # VLAN tag to use for this network on Installer VM, native means none
+ ip: 192.0.2.1 # IP to assign to Installer VM on this network
+ usable_ip_range:
+ - 192.0.2.11
+ - 192.0.2.99 # Usable ip range, if empty entire range is usable
+ gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
+ cidr: 192.0.2.0/24 # Subnet in CIDR format 192.168.1.0/24
+ dhcp_range:
+ - 192.0.2.2
+ - 192.0.2.10 # DHCP range for the admin network, if empty it will be automatically provisioned
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic1
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ members:
+ - nic1
+ #
+ tenant: # Tenant network configuration
+ enabled: true
+ cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
+ mtu: 64000 # Tenant network MTU
+ overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range:
+ # VNI, VLAN-ID, etc.
+ segmentation_type: vxlan # Tenant network segmentation type:
+ # vlan, vxlan, gre
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 401 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 401
+ members:
+ - nic1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ #
+ external: # Can contain 1 or more external networks
+ - public: # "public" network will be the network the installer VM attaches to
+ enabled: true
+ mtu: 64000 # Public network MTU
+ installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ vlan: 501
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ ip: 192.168.37.12 # IP to assign to Installer VM on this network
+ cidr: 192.168.37.0/24
+ gateway: 192.168.37.1
+ floating_ip_range:
+ - 192.168.37.200
+ - 192.168.37.220 # Range to allocate to floating IPs for the public network with Neutron
+ usable_ip_range:
+ - 192.168.37.10
+ - 192.168.37.199 # Usable IP range on the public network, usually this is a shared subnet
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 501 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic1
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 501
+ members:
+ - nic1
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: Public_internet
+ type: flat
+ gateway: 192.168.37.1
+ - private_cloud: # another external network
+ enabled: false
+ mtu: 64000
+ installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ vlan: 501
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ ip: 192.168.38.12 # IP to assign to Installer VM on this network
+ cidr: 192.168.38.0/24
+ gateway: 192.168.38.1
+ floating_ip_range:
+ - 192.168.38.200
+ - 192.168.38.220 # Range to allocate to floating IPs for the public network with Neutron
+ usable_ip_range:
+ - 192.168.38.10
+ - 192.168.38.199 # Usable IP range on the public network, usually this is a shared subnet
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 502 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 502
+ members:
+ - eth1
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: private_cloud
+ type: vlan
+ segmentation_id: 101
+ gateway: 192.168.38.1
+ #
+ storage: # Storage network configuration
+ enabled: true
+ cidr: 12.0.0.0/24 # Subnet in CIDR format
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: 201 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic4 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: 201
+ members:
+ - nic4
+ #
+ api: # API network configuration
+ enabled: false
+ cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: 101 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic5 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: 101
+ members:
+ - nic5
+
+# JOID specific settings
+joid:
+
+# Compass specific settings
+compass:
-#admin_network:
-# enabled: true
-# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond
-# bridged_interface: '' #Interface to bridge to for installer VM
-# bond_interfaces: '' #Interfaces to create bond with for installer VM
-# vlan: native #VLAN tag to use, native means none
-# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
-# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected
-# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet
-# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
-# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned
-# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes)
-#private_network:
-# enabled: false #If disabled, internal api traffic will collapse to admin_network
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
+# Apex specific settings
+apex:
+ networks:
+ admin:
+ introspection_range:
+ - 192.0.2.100
+ - 192.0.2.120 # Range used for introspection phase (examining nodes)
+# Fuel specific settings
+fuel:
diff --git a/docs/installationprocedure/architecture.rst b/docs/installationprocedure/architecture.rst
index c63da27c..c2b38d00 100644
--- a/docs/installationprocedure/architecture.rst
+++ b/docs/installationprocedure/architecture.rst
@@ -23,6 +23,64 @@ The overcloud is OPNFV. Configuration will be passed into undercloud and
the undercloud will use OpenStack's orchestration component, named Heat, to
execute a deployment that will provision the target OPNFV nodes.
+Apex High Availability Architecture
+===================================
+
+Undercloud
+----------
+
+The undercloud is not Highly Available. End users do not depend on the
+underloud. It is only for management purposes.
+
+Overcloud
+---------
+
+Apex will deploy three control nodes in an HA deployment. Each of these nodes
+will run the following services:
+
+- Stateless OpenStack services
+- MariaDB / Galera
+- RabbitMQ
+- OpenDaylight
+- HA Proxy
+- Pacemaker & VIPs
+
+Stateless OpenStack services
+ All running statesless OpenStack services are load balanced by HA Proxy.
+ Pacemaker monitors the services and ensures that they are running.
+
+Stateful OpenStack services
+ All running stateful OpenStack services are load balanced by HA Proxy.
+ They are monitored by pacemaker in an active/passive failover configuration.
+
+MariaDB / Galera
+ The MariaDB database is replicated across the control nodes using Galera.
+ Pacemaker is responsible for a proper start up of the Galera cluster. HA
+ Proxy provides and active/passive failover methodology to connections to the
+ database.
+
+RabbitMQ
+ The message bus is managed by Pacemaker to ensure proper start up and
+ establishment of clustering across cluster members.
+
+OpenDaylight
+ OpenDaylight is currently installed on all three control nodes but only
+ started on the first control node. OpenDaylight's HA capabilities are not yet
+ mature enough to be enabled.
+
+HA Proxy
+ HA Proxy is monitored by Pacemaker to ensure it is running across all nodes
+ and available to balance connections.
+
+Pacemaker & VIPs
+ Pacemaker has relationships and restraints setup to ensure proper service
+ start up order and Virtual IPs associated with specific services are running
+ on the proper host.
+
+VM Migration is configured and VMs can be evacuated as needed or as invoked
+by tools such as heat as part of a monitored stack deployment in the overcloud.
+
+
OPNFV Scenario Architecture
===========================
@@ -47,31 +105,32 @@ the installer as a (-d) deploy setting. Read further in the Apex documentation
to learn more about invoking the deploy command. Below is quick reference
matrix for OPNFV scenarios supported in Apex. Please refer to the respective
OPNFV Docs documentation for each scenario in order to see a full scenario
-description. The following scenarios correspond to a supported <Scenario>.yaml
-deploy settings file:
+description. Also, please refer to release-notes for information about known
+issues per scenario. The following scenarios correspond to a supported
+<Scenario>.yaml deploy settings file:
+-------------------------+------------+-----------------+
-| **Scenario** | **Owner** | **Known Issues**|
+| **Scenario** | **Owner** | **Supported** |
+-------------------------+------------+-----------------+
-| os-nosdn-nofeature-ha | Apex | |
+| os-nosdn-nofeature-ha | Apex | Yes |
+-------------------------+------------+-----------------+
-| os-nosdn-nofeature-noha | Apex | |
+| os-nosdn-nofeature-noha | Apex | Yes |
+-------------------------+------------+-----------------+
-| os-nosdn-ovs-noha | OVS for NFV| |
+| os-nosdn-ovs-noha | OVS for NFV| Yes |
+-------------------------+------------+-----------------+
-| os-nosdn-fdio-noha | FDS | |
+| os-nosdn-fdio-noha | FDS | Yes |
+-------------------------+------------+-----------------+
-| os-odl_l2-nofeature-ha | Apex | |
+| os-odl_l2-nofeature-ha | Apex | Yes |
+-------------------------+------------+-----------------+
-| os-odl_l3-nofeature-ha | Apex | APEX-112 |
+| os-odl_l3-nofeature-ha | Apex | Yes |
+-------------------------+------------+-----------------+
-| os-odl_l2-sfc-noha | SFC | |
+| os-odl_l2-sfc-noha | SFC | Yes |
+-------------------------+------------+-----------------+
-| os-odl_l2-bgpvpn-noha | SDNVPN | |
+| os-odl_l2-bgpvpn-ha | SDNVPN | No |
+-------------------------+------------+-----------------+
-| os-odl_l2-fdio-noha | FDS | |
+| os-odl_l2-fdio-noha | FDS | Yes |
+-------------------------+------------+-----------------+
-| os-onos-nofeature-ha | ONOSFW | |
+| os-onos-nofeature-ha | ONOSFW | Yes |
+-------------------------+------------+-----------------+
-| os-onos-sfc-ha | ONOSFW | |
+| os-onos-sfc-ha | ONOSFW | Yes |
+-------------------------+------------+-----------------+
diff --git a/docs/installationprocedure/baremetal.rst b/docs/installationprocedure/baremetal.rst
index 8507b445..2de6e8a8 100644
--- a/docs/installationprocedure/baremetal.rst
+++ b/docs/installationprocedure/baremetal.rst
@@ -137,6 +137,12 @@ Install Bare Metal Jumphost
no longer carry them and they will not need special handling for
installation.
+ Python 3.4 is also required and it needs to be installed if you are using
+ the Centos 7 base image:
+
+ ``sudo yum install epel-release``
+ ``sudo yum install python34``
+
To install these RPMs download them to the local disk on your CentOS 7
install and pass the file names directly to yum:
``sudo yum install python34-markupsafe-<version>.rpm
@@ -199,7 +205,8 @@ Edit the 2 settings files in /etc/opnfv-apex/. These files have comments to
help you customize them.
1. deploy_settings.yaml
- This file includes basic configuration options deployment.
+ This file includes basic configuration options deployment, and also documents
+ all available options.
Alternatively, there are pre-built deploy_settings files available in
(``/etc/opnfv-apex/``). These files are named with the naming convention
os-sdn_controller-enabled_feature-[no]ha.yaml. These files can be used in
@@ -223,11 +230,10 @@ You are now ready to deploy OPNFV using Apex!
Follow the steps below to execute:
1. Execute opnfv-deploy
- ``sudo opnfv-deploy [ --flat ] -n network_settings.yaml
+ ``sudo opnfv-deploy -n network_settings.yaml
-i inventory.yaml -d deploy_settings.yaml``
If you need more information about the options that can be passed to
- opnfv-deploy use ``opnfv-deploy --help`` --flat collapses all networks to a
- single nic, only uses the admin network from the network settings file. -n
+ opnfv-deploy use ``opnfv-deploy --help``. -n
network_settings.yaml allows you to customize your networking topology.
2. Wait while deployment is executed.
diff --git a/docs/installationprocedure/references.rst b/docs/installationprocedure/references.rst
index 5ff2a542..a63a8421 100644
--- a/docs/installationprocedure/references.rst
+++ b/docs/installationprocedure/references.rst
@@ -18,6 +18,8 @@ OPNFV
`OPNFV Apex project page <https://wiki.opnfv.org/apex>`_
+`OPNFV Apex release notes <http://artifacts.opnfv.org/apex/colorado/docs/releasenotes/release-notes.html#references>`_
+
OpenStack
---------
diff --git a/docs/installationprocedure/requirements.rst b/docs/installationprocedure/requirements.rst
index d54d584b..1b3fe87d 100644
--- a/docs/installationprocedure/requirements.rst
+++ b/docs/installationprocedure/requirements.rst
@@ -46,14 +46,13 @@ Network requirements include:
deployment. The External network is where public internet access would
reside if available.
-\* *These networks can be combined with each other or all combined on the
- Control Plane network.*
-\* *Non-External networks will be consolidated to the Control Plane network
- if not specifically configured.*
-\*\* *Internal API network, by default, is collapsed with provisioning in IPv4
- deployments, this is not possible with the current lack of PXE boot
- support and therefore the API network is required to be its own
- network in an IPv6 deployment.*
+\*These networks can be combined with each other or all combined on the
+Control Plane network.
+
+\*\*Internal API network, by default, is collapsed with provisioning in IPv4
+deployments, this is not possible with the current lack of PXE boot
+support and therefore the API network is required to be its own
+network in an IPv6 deployment.
Bare Metal Node Requirements
----------------------------
diff --git a/docs/installationprocedure/virtualinstall.rst b/docs/installationprocedure/virtualinstall.rst
index 01971893..d2c81abe 100644
--- a/docs/installationprocedure/virtualinstall.rst
+++ b/docs/installationprocedure/virtualinstall.rst
@@ -49,7 +49,7 @@ environment will deploy with the following architecture:
Follow the steps below to execute:
1. ``sudo opnfv-deploy -v [ --virtual-computes n ]
- [ --virtual-cpus n ] [ --virtual-ram n ] [ --flat ]
+ [ --virtual-cpus n ] [ --virtual-ram n ]
-n network_settings.yaml -i inventory.yaml -d deploy_settings.yaml``
2. It will take approximately 45 minutes to an hour to stand up undercloud,
diff --git a/docs/release-notes/release-notes.rst b/docs/release-notes/release-notes.rst
deleted file mode 100644
index 080d2043..00000000
--- a/docs/release-notes/release-notes.rst
+++ /dev/null
@@ -1,247 +0,0 @@
-==========================================================================
-OPNFV Release Notes for the Colorado release of OPNFV Apex deployment tool
-==========================================================================
-
-
-.. contents:: Table of Contents
- :backlinks: none
-
-
-Abstract
-========
-
-This document provides the release notes for Colorado release with the Apex
-deployment toolchain.
-
-License
-=======
-
-All Apex and "common" entities are protected by the Apache License
-( http://www.apache.org/licenses/ )
-
-
-Version history
-===============
-
-
-+-------------+-----------+-----------------+----------------------+
-| **Date** | **Ver.** | **Authors** | **Comment** |
-| | | | |
-+-------------+-----------+-----------------+----------------------+
-| 2016-08-11 | 2.0.0 | Dan Radez | Updates for Colorado |
-+-------------+-----------+-----------------+----------------------+
-| 2015-09-17 | 1.0.0 | Dan Radez | Rewritten for |
-| | | | RDO Manager update |
-+-------------+-----------+-----------------+----------------------+
-
-Important notes
-===============
-
-This is the OPNFV Colorado release that implements the deploy stage of the
-OPNFV CI pipeline via Apex.
-
-Apex is based on RDO's Triple-O installation tool chain.
-More information at http://rdoproject.org
-
-Carefully follow the installation-instructions which guide a user on how to
-deploy OPNFV using Apex installer.
-
-Summary
-=======
-
-Colorado release with the Apex deployment toolchain will establish an OPNFV
-target system on a Pharos compliant lab infrastructure. The current definition
-of an OPNFV target system is and OpenStack Liberty combined with OpenDaylight
-Beryllium. The system is deployed with OpenStack High Availability (HA) for
-most OpenStack services. OpenDaylight is deployed in non-HA form as HA support
-is not availble for OpenDaylight at the time of the Colorado release. Ceph
-storage is used as Cinder backend, and is the only supported storage for
-Colorado. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller
-node.
-
-- Documentation is built by Jenkins
-- .iso image is built by Jenkins
-- .rpm packages are built by Jenkins
-- Jenkins deploys a Colorado release with the Apex deployment toolchain
- baremetal, which includes 3 control+network nodes, and 2 compute nodes.
-
-Release Data
-============
-
-+--------------------------------------+--------------------------------------+
-| **Project** | apex |
-| | |
-+--------------------------------------+--------------------------------------+
-| **Repo/tag** | apex/colorado.1.0 |
-| | |
-+--------------------------------------+--------------------------------------+
-| **Release designation** | colorado.1.0 |
-| | |
-+--------------------------------------+--------------------------------------+
-| **Release date** | 2016-09-14 |
-| | |
-+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | OPNFV Colorado release |
-| | |
-+--------------------------------------+--------------------------------------+
-
-Version change
---------------
-
-Module version changes
-~~~~~~~~~~~~~~~~~~~~~~
-This is the first tracked version of the Colorado release with the Apex
-deployment toolchain. It is based on following upstream versions:
-
-- OpenStack (Mitaka release)
-
-- OpenDaylight (Beryllium release)
-
-- CentOS 7
-
-Document version changes
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-This is the first tracked version of Colorado release with the Apex
-deployment toolchain.
-The following documentation is provided with this release:
-
-- OPNFV Installation instructions for the Colorado release with the Apex
- deployment toolchain - ver. 1.0.0
-- OPNFV Release Notes for the Colorado release with the Apex deployment
- toolchain - ver. 1.0.0 (this document)
-
-Feature additions
-~~~~~~~~~~~~~~~~~
-
-+--------------------------------------+--------------------------------------+
-| **JIRA REFERENCE** | **SLOGAN** |
-| | |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-32 | Build.sh integration of RDO Manager |
-| | |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-6 | Deploy.sh integration of RDO Manager |
-| | |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-34 | Migrate and update Release |
-| | Documentation for Colorado |
-+--------------------------------------+--------------------------------------+
-
-Bug corrections
-~~~~~~~~~~~~~~~
-
-**JIRA TICKETS:**
-
-+--------------------------------------+--------------------------------------+
-| **JIRA REFERENCE** | **SLOGAN** |
-| | |
-+--------------------------------------+--------------------------------------+
-| | |
-| | |
-+--------------------------------------+--------------------------------------+
-
-Deliverables
-------------
-
-Software deliverables
-~~~~~~~~~~~~~~~~~~~~~
-Apex .iso file
-Apex overcloud .rpm (opnfv-apex)
-Apex undercloud .rpm (opnfv-apex-undercloud)
-Apex common .rpm (opnfv-apex-common)
-build.sh - Builds the above artifacts
-opnfv-deploy - Automatically deploys Target OPNFV System
-opnfv-clean - Automatically resets a Target OPNFV Deployment
-
-Documentation deliverables
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-- OPNFV Installation instructions for the Colorado release with the Apex
- deployment toolchain - ver. 1.0.0
-- OPNFV Release Notes for the Colorado release with the Apex deployment
- toolchain - ver. 1.0.0 (this document)
-
-Known Limitations, Issues and Workarounds
-=========================================
-
-System Limitations
-------------------
-
-**Max number of blades:** 1 Apex undercloud, 3 Controllers, 20 Compute blades
-
-**Min number of blades:** 1 Apex undercloud, 1 Controller, 1 Compute blade
-
-**Storage:** Ceph is the only supported storage configuration.
-
-**Min master requirements:** At least 16GB of RAM
-
-
-Known issues
-------------
-
-**JIRA TICKETS:**
-
-+--------------------------------------+--------------------------------------+
-| **JIRA REFERENCE** | **SLOGAN** |
-| | |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-89 | Deploy Ceph OSDs on the compute |
-| | nodes also |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-27 | OpenContrail Support |
-| | |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-30 | Support for VLAN tagged network |
-| | deployment architecture |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-100 | DNS1 and DNS2 not handled in |
-| | nic bridging |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-47 | Integrate Tacker as part of SFC |
-| | Experimental Feature |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-84 | --flat option no longer working |
-| | |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-51 | Integrate SDNVPN as a deploy option |
-| | |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-99 | Syntax error when |
-| | running opnfv-deploy |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-86 | Compute node count configurable |
-| | for virtual deployments |
-+--------------------------------------+--------------------------------------+
-| JIRA: APEX-141 | Adding VSPERF support |
-| | |
-+--------------------------------------+--------------------------------------+
-
-Workarounds
------------
-**-**
-
-
-Test Result
-===========
-
-The Colorado release with the Apex deployment toolchain has undergone QA
-test runs with the following results:
-
-+--------------------------------------+--------------------------------------+
-| **TEST-SUITE** | **Results:** |
-| | |
-+--------------------------------------+--------------------------------------+
-| **-** | **-** |
-+--------------------------------------+--------------------------------------+
-
-
-References
-==========
-
-For more information on the OPNFV Colorado release, please see:
-
-http://wiki.opnfv.org/releases/Colorado
-
-:Authors: Tim Rozet (trozet@redhat.com)
-:Authors: Dan Radez (dradez@redhat.com)
-:Version: 1.0.0
diff --git a/docs/release-notes/index.rst b/docs/releasenotes/index.rst
index 1f723960..1f723960 100644
--- a/docs/release-notes/index.rst
+++ b/docs/releasenotes/index.rst
diff --git a/docs/releasenotes/release-notes.rst b/docs/releasenotes/release-notes.rst
new file mode 100644
index 00000000..52b676ec
--- /dev/null
+++ b/docs/releasenotes/release-notes.rst
@@ -0,0 +1,384 @@
+==========================================================================
+OPNFV Release Notes for the Colorado release of OPNFV Apex deployment tool
+==========================================================================
+
+
+.. contents:: Table of Contents
+ :backlinks: none
+
+
+Abstract
+========
+
+This document provides the release notes for Colorado release with the Apex
+deployment toolchain.
+
+License
+=======
+
+All Apex and "common" entities are protected by the Apache License
+( http://www.apache.org/licenses/ )
+
+
+Version History
+===============
+
+
++-------------+-----------+-----------------+----------------------+
+| **Date** | **Ver.** | **Authors** | **Comment** |
+| | | | |
++-------------+-----------+-----------------+----------------------+
+| 2016-09-20 | 2.1.0 | Tim Rozet | More updates for |
+| | | | Colorado |
++-------------+-----------+-----------------+----------------------+
+| 2016-08-11 | 2.0.0 | Dan Radez | Updates for Colorado |
++-------------+-----------+-----------------+----------------------+
+| 2015-09-17 | 1.0.0 | Dan Radez | Rewritten for |
+| | | | RDO Manager update |
++-------------+-----------+-----------------+----------------------+
+
+Important Notes
+===============
+
+This is the OPNFV Colorado release that implements the deploy stage of the
+OPNFV CI pipeline via Apex.
+
+Apex is based on RDO's Triple-O installation tool chain.
+More information at http://rdoproject.org
+
+Carefully follow the installation-instructions which guide a user on how to
+deploy OPNFV using Apex installer.
+
+Summary
+=======
+
+Colorado release with the Apex deployment toolchain will establish an OPNFV
+target system on a Pharos compliant lab infrastructure. The current definition
+of an OPNFV target system is OpenStack Mitaka combined with an SDN
+controller, such as OpenDaylight. The system is deployed with OpenStack High
+Availability (HA) for most OpenStack services. SDN controllers are deployed
+only on the first controller (see HAIssues_ for known HA SDN issues). Ceph
+storage is used as Cinder backend, and is the only supported storage for
+Colorado. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller
+node in an HA setup. Apex also supports non-HA deployments, which deploys a
+single controller and n number of compute nodes. Furthermore, Apex is
+capable of deploying scenarios in a bare metal or virtual fashion. Virtual
+deployments use multiple VMs on the jump host and internal networking to
+simulate the a bare metal deployment.
+
+- Documentation is built by Jenkins
+- .iso image is built by Jenkins
+- .rpm packages are built by Jenkins
+- Jenkins deploys a Colorado release with the Apex deployment toolchain
+ bare metal, which includes 3 control+network nodes, and 2 compute nodes.
+
+Release Data
+============
+
++--------------------------------------+--------------------------------------+
+| **Project** | apex |
+| | |
++--------------------------------------+--------------------------------------+
+| **Repo/tag** | apex/colorado.1.0 |
+| | |
++--------------------------------------+--------------------------------------+
+| **Release designation** | colorado.1.0 |
+| | |
++--------------------------------------+--------------------------------------+
+| **Release date** | 2016-09-22 |
+| | |
++--------------------------------------+--------------------------------------+
+| **Purpose of the delivery** | OPNFV Colorado release |
+| | |
++--------------------------------------+--------------------------------------+
+
+Version change
+--------------
+
+Module version changes
+~~~~~~~~~~~~~~~~~~~~~~
+This is the first tracked version of the Colorado release with the Apex
+deployment toolchain. It is based on following upstream versions:
+
+- OpenStack (Mitaka release)
+
+- OpenDaylight (Beryllium/Boron releases)
+
+- CentOS 7
+
+Document Version Changes
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is the first tracked version of Colorado release with the Apex
+deployment toolchain.
+The following documentation is provided with this release:
+
+- OPNFV Installation instructions for the Colorado release with the Apex
+ deployment toolchain - ver. 1.0.0
+- OPNFV Release Notes for the Colorado release with the Apex deployment
+ toolchain - ver. 1.0.0 (this document)
+
+Feature Additions
+~~~~~~~~~~~~~~~~~
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE** | **SLOGAN** |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-107 | OpenDaylight HA - OVSDB Clustering |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-108 | Migrate to OpenStack Mitaka |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-30 | Support VLAN tagged deployments |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-105 | Enable Huge Page Configuration |
+| | Options |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-111 | Allow RAM to be specified for |
+| | Control/Compute in Virtual |
+| | Deployments |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-119 | Enable OVS DPDK as a deployment |
+| | Scenario in Apex |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-126 | Tacker Service deployed by Apex |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-135 | Congress Service deployed by Apex |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-127 | Nova Instance CPU Pinning |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-130 | IPv6 Underlay Deployment |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-133 | FDIO with Honeycomb Agent |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-141 | Integrate VSPERF into Apex |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-172 | Enable ONOS SFC |
++--------------------------------------+--------------------------------------+
+
+Bug Corrections
+~~~~~~~~~~~~~~~
+
+**JIRA TICKETS:**
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE** | **SLOGAN** |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-86 | Need ability to specify number of |
+| | compute nodes |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-180 | Baremetal deployment error: Failed to|
+| | mount root partition /dev/sda on |
+| | /mnt/rootfs |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-161 | Heat autoscaling stack creation fails|
+| | for non-admin users |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-198 | Missing NAT iptables rule for public |
+| | network in instack VM |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-147 | Installer doesn't generate/distribute|
+| | SSH keys between compute nodes |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-109 | ONOS routes local subnet traffic to |
+| | GW |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-146 | Swift service present in available |
+| | endpoints |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-160 | Enable force_metadata to support |
+| | subnets with VM as the router |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-114 | OpenDaylight GUI is not available |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-100 | DNS1 and DNS2 should be handled in |
+| | nic bridging |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-100 | DNS1 and DNS2 should be handled in |
+| | nic bridging |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-155 | NIC Metric value not used when |
+| | bridging NICs |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-136 | 2 network deployment fails |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-89 | Deploy Ceph OSDs on compute nodes |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-137 | added arping ass dependency for |
+| | ONOS deployments |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-121 | VM Storage deletion intermittently |
+| | fails |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-182 | Nova services not correctly deployed |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-153 | brbm bridge not created in jumphost |
++--------------------------------------+--------------------------------------+
+
+Deliverables
+------------
+
+Software Deliverables
+~~~~~~~~~~~~~~~~~~~~~
+- Apex .iso file
+- Apex overcloud .rpm (opnfv-apex) - For nosdn and OpenDaylight Scenarios
+- Apex overcloud onos .rpm (opnfv-apex-onos) - ONOS Scenarios
+- Apex overcloud ODL SFC .rpm (opnfv-apex-opendaylight-sfc) - ODL SFC Scenario
+- Apex undercloud .rpm (opnfv-apex-undercloud)
+- Apex common .rpm (opnfv-apex-common)
+- build.sh - Builds the above artifacts
+- opnfv-deploy - Automatically deploys Target OPNFV System
+- opnfv-clean - Automatically resets a Target OPNFV Deployment
+- opnfv-util - Utility to connect to or debug Overcloud nodes + OpenDaylight
+
+Documentation Deliverables
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+- OPNFV Installation instructions for the Colorado release with the Apex
+ deployment toolchain - ver. 1.0.0
+- OPNFV Release Notes for the Colorado release with the Apex deployment
+ toolchain - ver. 1.0.0 (this document)
+
+Known Limitations, Issues and Workarounds
+=========================================
+
+System Limitations
+------------------
+
+**Max number of blades:** 1 Apex undercloud, 3 Controllers, 20 Compute blades
+
+**Min number of blades:** 1 Apex undercloud, 1 Controller, 1 Compute blade
+
+**Storage:** Ceph is the only supported storage configuration.
+
+**Min master requirements:** At least 16GB of RAM for baremetal jumphost,
+24GB for virtual deployments (noHA).
+
+
+Known Issues
+------------
+
+**JIRA TICKETS:**
+
++--------------------------------------+--------------------------------------+
+| **JIRA REFERENCE** | **SLOGAN** |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-203 | Swift proxy enabled and fails in noha|
+| | deployments |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-215 | Keystone services not configured and |
+| | the error is silently ignored (VLAN |
+| | Deployments) |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-208 | Need ability to specify which NIC to |
+| | place VLAN on |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-254 | Add dynamic hugepages configuration |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-138 | Unclear error message when interface |
+| | set to dhcp |
++--------------------------------------+--------------------------------------+
+
+
+Workarounds
+-----------
+**-**
+
+Scenario specific release notes
+===============================
+
+Scenario os-odl_l3-nofeature known issues
+-----------------------------------------
+
+* `APEX-112 <https://jira.opnfv.org/browse/APEX-112>`_:
+ ODL routes local subnet traffic to GW
+
+Scenario os-odl_l2-nofeature known issues
+-----------------------------------------
+
+* `APEX-149 <https://jira.opnfv.org/browse/APEX-149>`_:
+ Openflow rules are populated very slowly
+
+Scenario os-odl_l2-bgpvpn known issues
+--------------------------------------
+
+* `APEX-278 <https://jira.opnfv.org/browse/APEX-278>`_:
+ Duplicate neutron config class declaration for SDNVPN
+
+Scenario os-onos-nofeatures/os-onos-sfc known issues
+----------------------------------------------------
+
+* `APEX-281 <https://jira.opnfv.org/browse/APEX-281>`_:
+ ONOS sometimes fails to provide addresses to instances
+
+Scenario os-odl_l2-sfc-noha known issues
+----------------------------------------
+
+* `APEX-275 <https://jira.opnfv.org/browse/APEX-275>`_:
+ Metadata fails in Boron
+
+Scenario os-nosdn-ovs known issues
+----------------------------------
+
+* `APEX-274 <https://jira.opnfv.org/browse/APEX-274>`_:
+ OVS DPDK scenario does not create vhost user ports
+
+Scenario os-odl_l2-fdio-noha known issues
+-----------------------------------------
+
+* `FDS-16 <https://jira.opnfv.org/browse/FDS-16>`_:
+ Security group configuration through nova leads
+ to vhostuser port connection issues
+* `FDS-62 <https://jira.opnfv.org/browse/FDS-62>`_:
+ APEX - Increase number of files MariaDB can open
+* `FDS-79 <https://jira.opnfv.org/browse/FDS-79>`_:
+ Sometimes (especially in bulk crete/delete operations
+ when multiple networks/ports are created within short time)
+ OpenDaylight doesn't accept creation requests
+* `FDS-80 <https://jira.opnfv.org/browse/FDS-80>`_:
+ After launching a VM it stayed forever in BUILD status.
+ Also further operation related to this VM (volume attachment etc.)
+ caused problems
+* `FDS-81 <https://jira.opnfv.org/browse/FDS-81>`_:
+ After functest finishes there are two bds on computes and
+ none on controller
+* `FDS-82 <https://jira.opnfv.org/browse/FDS-82>`_:
+ Nova list shows no vms but there are some on computes in paused state
+* `APEX-217 <https://jira.opnfv.org/browse/APEX-217>`_:
+ qemu not configured with correct group:user
+
+.. _HAIssues:
+
+General HA scenario known issues
+--------------------------------
+
+* `COPPER-22 <https://jira.opnfv.org/browse/COPPER-22>`_:
+ Congress service HA deployment is not yet supported/verified.
+* `APEX-276 <https://jira.opnfv.org/browse/APEX-276>`_:
+ ODL HA unstable and crashes frequently
+
+Test Result
+===========
+
+The Colorado release with the Apex deployment toolchain has undergone QA
+test runs with the following results:
+
++--------------------------------------+--------------------------------------+
+| **TEST-SUITE** | **Results:** |
+| | |
++--------------------------------------+--------------------------------------+
+| **-** | **-** |
++--------------------------------------+--------------------------------------+
+
+
+References
+==========
+
+For more information on the OPNFV Colorado release, please see:
+
+http://wiki.opnfv.org/releases/Colorado
+
+:Authors: Tim Rozet (trozet@redhat.com)
+:Authors: Dan Radez (dradez@redhat.com)
+:Version: 2.1.0
diff --git a/lib/common-functions.sh b/lib/common-functions.sh
index 6941093c..2d113450 100644
--- a/lib/common-functions.sh
+++ b/lib/common-functions.sh
@@ -33,12 +33,12 @@ function find_ip {
af=$2
fi
- python3.4 -B $LIB/python/apex_python_utils.py find-ip -i $1 -af $af
+ python3 -B $LIB/python/apex_python_utils.py find-ip -i $1 -af $af
}
##attach interface to OVS and set the network config correctly
##params: bride to attach to, interface to attach, network type (optional)
-##public indicates attaching to a public interface
+##external indicates attaching to a external interface
function attach_interface_to_ovs {
local bridge interface
local if_ip if_mask if_gw if_file ovs_file if_prefix
@@ -72,15 +72,15 @@ function attach_interface_to_ovs {
if [ -z "$if_mask" ]; then
# we can look for PREFIX here, then convert it to NETMASK
- if_prefix=$(sed -n 's/^PREFIX=\(.*\)$/\1/p' ${if_file})
+ if_prefix=$(sed -n 's/^PREFIX=[^0-9]*\([0-9][0-9]*\)[^0-9]*$/\1/p' ${if_file})
if_mask=$(prefix2mask ${if_prefix})
fi
if [[ -z "$if_ip" || -z "$if_mask" ]]; then
echo "ERROR: IPADDR or NETMASK/PREFIX missing for ${interface}"
return 1
- elif [[ -z "$if_gw" && "$3" == "public_network" ]]; then
- echo "ERROR: GATEWAY missing for ${interface}, which is public"
+ elif [[ -z "$if_gw" && "$3" == "external" ]]; then
+ echo "ERROR: GATEWAY missing for ${interface}, which is external"
return 1
fi
diff --git a/lib/configure-deps-functions.sh b/lib/configure-deps-functions.sh
index 2c5411e3..1d238f87 100755
--- a/lib/configure-deps-functions.sh
+++ b/lib/configure-deps-functions.sh
@@ -33,13 +33,9 @@ function configure_deps {
systemctl status libvirtd || systemctl start libvirtd
systemctl status openvswitch || systemctl start openvswitch
- # If flat we only use admin network
- if [[ "$net_isolation_enabled" == "FALSE" ]]; then
- virsh_enabled_networks="admin_network"
- enabled_network_list="admin_network"
- # For baremetal we only need to create/attach Undercloud to admin and public
- elif [ "$virtual" == "FALSE" ]; then
- virsh_enabled_networks="admin_network public_network"
+ # For baremetal we only need to create/attach Undercloud to admin and external
+ if [ "$virtual" == "FALSE" ]; then
+ virsh_enabled_networks="admin external"
else
virsh_enabled_networks=$enabled_network_list
fi
@@ -54,7 +50,7 @@ function configure_deps {
for network in ${enabled_network_list}; do
echo "${blue}INFO: Creating Virsh Network: $network & OVS Bridge: ${NET_MAP[$network]}${reset}"
ovs-vsctl list-br | grep "^${NET_MAP[$network]}$" > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]}
- virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
+ virsh net-list --all | grep " $network " > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
<network>
<name>$network</name>
<forward mode='bridge'/>
@@ -62,7 +58,7 @@ function configure_deps {
<virtualport type='openvswitch'/>
</network>
EOF
- if ! (virsh net-list --all | grep $network > /dev/null); then
+ if ! (virsh net-list --all | grep " $network " > /dev/null); then
echo "${red}ERROR: unable to create network: ${network}${reset}"
exit 1;
fi
@@ -76,7 +72,7 @@ EOF
# bridge interfaces to correct OVS instances for baremetal deployment
for network in ${enabled_network_list}; do
- if [[ "$network" != "admin_network" && "$network" != "public_network" ]]; then
+ if [[ "$network" != "admin" && "$network" != "external" ]]; then
continue
fi
this_interface=$(eval echo \${${network}_bridged_interface})
@@ -95,11 +91,17 @@ EOF
done
else
for network in ${OPNFV_NETWORK_TYPES}; do
+ if ! ovs-vsctl --may-exist add-br ${NET_MAP[$network]}; then
+ echo -e "${red}ERROR: Failed to create ovs bridge ${NET_MAP[$network]}{$reset}"
+ exit 1
+ fi
echo "${blue}INFO: Creating Virsh Network: $network${reset}"
- virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
+ virsh net-list --all | grep " $network " > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
<network ipv6='yes'>
<name>$network</name>
+<forward mode='bridge'/>
<bridge name='${NET_MAP[$network]}'/>
+<virtualport type='openvswitch'/>
</network>
EOF
if ! (virsh net-list --all | grep $network > /dev/null); then
@@ -112,7 +114,7 @@ EOF
done
echo -e "${blue}INFO: Bridges set: ${reset}"
- brctl show
+ ovs-vsctl list-br
fi
echo -e "${blue}INFO: virsh networks set: ${reset}"
diff --git a/lib/overcloud-deploy-functions.sh b/lib/overcloud-deploy-functions.sh
index e278a68e..2066f15a 100755
--- a/lib/overcloud-deploy-functions.sh
+++ b/lib/overcloud-deploy-functions.sh
@@ -110,7 +110,11 @@ EOF
-a overcloud-full.qcow2
fi
else
+ sudo sed -i '/NeutronOVSDataPathType:/c\ NeutronOVSDataPathType: netdev' /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum install -y /root/dpdk_rpms/*" \
+ --run-command "sed -i '/RuntimeDirectoryMode=.*/d' /usr/lib/systemd/system/openvswitch-nonetwork.service" \
+ --run-command "printf \"%s\\n\" RuntimeDirectoryMode=0775 Group=qemu UMask=0002 >> /usr/lib/systemd/system/openvswitch-nonetwork.service" \
+ --run-command "sed -i 's/\\(^\\s\\+\\)\\(start_daemon "$OVS_VSWITCHD_PRIORITY"\\)/\\1umask 0002 \\&\\& \\2/' /usr/share/openvswitch/scripts/ovs-ctl" \
-a overcloud-full.qcow2
fi
EOI
@@ -178,8 +182,20 @@ EOI
# set NIC heat params and resource registry
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
-sed -i '/TenantNIC:/c\ TenantNIC: '${private_network_compute_interface} opnfv-environment.yaml
-sed -i '/PublicNIC:/c\ PublicNIC: '${public_network_compute_interface} opnfv-environment.yaml
+if [ -n "${private_network_compute_interface}" ]; then
+ sudo sed -i '/ComputeTenantNIC:/c\ ComputeTenantNIC: '${private_network_compute_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
+fi
+if [ -n "${private_network_controller_interface}" ]; then
+ sudo sed -i '/ControllerTenantNIC:/c\ ControllerTenantNIC: '${private_network_controller_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
+fi
+# TODO: PublicNIC is not used today, however, in the future, we'll bind public nic to DPDK as well for certain scenarios. At that time,
+# we'll need to make sure public network is enabled.
+if [ -n "${public_network_compute_interface}" ]; then
+ sudo sed -i '/ComputePublicNIC:/c\ ComputePublicNIC: '${public_network_compute_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
+fi
+if [ -n "${public_network_controller_interface}" ]; then
+ sudo sed -i '/ControllerPublicNIC:/c\ ControllerPublicNIC: '${public_network_controller_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
+fi
EOI
DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml"
@@ -188,6 +204,10 @@ EOI
# make sure ceph is installed
DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
+ #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
+ DEPLOY_OPTIONS+=" -e network-environment.yaml"
+
+
# get number of nodes available in inventory
num_control_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:control /home/stack/instackenv.json")
num_compute_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:compute /home/stack/instackenv.json")
@@ -217,14 +237,7 @@ EOI
DEPLOY_OPTIONS+=" --compute-scale ${num_compute_nodes}"
fi
- if [[ "$net_isolation_enabled" == "TRUE" ]]; then
- #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
- DEPLOY_OPTIONS+=" -e network-environment.yaml"
- fi
-
- if [[ "$ha_enabled" == "True" ]] || [[ "$net_isolation_enabled" == "TRUE" ]]; then
- DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
- fi
+ DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
DEPLOY_OPTIONS+=" --control-flavor control --compute-flavor compute"
if [[ "$virtual" == "TRUE" ]]; then
diff --git a/lib/parse-functions.sh b/lib/parse-functions.sh
index 40cdb826..84da75c5 100755
--- a/lib/parse-functions.sh
+++ b/lib/parse-functions.sh
@@ -10,46 +10,6 @@
# Parser functions used by OPNFV Apex
-##translates yaml into variables
-##params: filename, prefix (ex. "config_")
-##usage: parse_yaml opnfv_ksgen_settings.yml "config_"
-parse_yaml() {
- local prefix=$2
- local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034')
- sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
- -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 |
- awk -F$fs '{
- indent = length($1)/2;
- vname[indent] = $2;
- for (i in vname) {if (i > indent) {delete vname[i]}}
- if (length($3) > 0) {
- vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
- printf("%s%s%s=%s\n", "'$prefix'",vn, $2, $3);
- }
- }'
-}
-
-##parses variable from a string with '='
-##and removes global prefix
-##params: string, prefix
-##usage: parse_setting_var 'deploy_myvar=2' 'deploy_'
-parse_setting_var() {
- local mystr=$1
- local prefix=$2
- if echo $mystr | grep -E "^.+\=" > /dev/null; then
- echo $(echo $mystr | grep -Eo "^.+\=" | tr -d '=' | sed 's/^'"$prefix"'//')
- else
- return 1
- fi
-}
-##parses value from a string with '='
-##params: string
-##usage: parse_setting_value
-parse_setting_value() {
- local mystr=$1
- echo $(echo $mystr | grep -Eo "\=.*$" | tr -d '=')
-}
-
##parses network settings yaml into globals
parse_network_settings() {
local output parse_ext
@@ -65,7 +25,7 @@ parse_network_settings() {
done
fi
- if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS $net_isolation_arg -e $CONFIG/network-environment.yaml $parse_ext); then
+ if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $CONFIG/network-environment.yaml $parse_ext); then
echo -e "${blue}${output}${reset}"
eval "$output"
else
@@ -74,11 +34,7 @@ parse_network_settings() {
fi
if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- if [ "$net_isolation_enabled" == "FALSE" ]; then
- echo -e "${red}ERROR: flat network is not supported with ovs-dpdk ${reset}"
- exit 1
- fi
- if [[ ! $enabled_network_list =~ "private_network" ]]; then
+ if [[ ! $enabled_network_list =~ "tenant" ]]; then
echo -e "${red}ERROR: tenant network is not enabled for ovs-dpdk ${reset}"
exit 1
fi
diff --git a/lib/post-install-functions.sh b/lib/post-install-functions.sh
index 7e7db5ca..d21b8366 100755
--- a/lib/post-install-functions.sh
+++ b/lib/post-install-functions.sh
@@ -11,9 +11,9 @@
##Post configuration after install
##params: none
function configure_post_install {
- local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip af public_network_ipv6
- public_network_ipv6=False
- opnfv_attach_networks="admin_network public_network"
+ local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip af external_network_ipv6
+ external_network_ipv6=False
+ opnfv_attach_networks="admin external"
echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
@@ -46,8 +46,8 @@ EOI
af=4
else
af=6
- if [ "$network" == "public_network" ]; then
- public_network_ipv6=True
+ if [ "$network" == "external" ]; then
+ ublic_network_ipv6=True
fi
#enable ipv6 on bridge interface
echo 0 > /proc/sys/net/ipv6/conf/${NET_MAP[$network]}/disable_ipv6
@@ -87,15 +87,15 @@ EOI
source overcloudrc
set -o errexit
echo "Configuring Neutron external network"
-if [[ -n "$public_network_vlan" && "$public_network_vlan" != 'native' ]]; then
- neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --provider:network_type vlan --provider:segmentation_id ${public_network_vlan} --provider:physical_network datacentre
+if [[ -n "$external_nic_mapping_compute_vlan" && "$external_nic_mapping_compute_vlan" != 'native' ]]; then
+ neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --provider:network_type vlan --provider:segmentation_id ${external_nic_mapping_compute_vlan} --provider:physical_network datacentre
else
neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }')
fi
-if [ "$public_network_ipv6" == "True" ]; then
- neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') external --ip_version 6 --ipv6_ra_mode slaac --ipv6_address_mode slaac --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
+if [ "$external_network_ipv6" == "True" ]; then
+ neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') external --ip_version 6 --ipv6_ra_mode slaac --ipv6_address_mode slaac --gateway ${external_gateway} --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} ${external_cidr}
else
- neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
+ neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${external_gateway} --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} ${external_cidr}
fi
echo "Removing sahara endpoint and service"
@@ -142,14 +142,14 @@ if [ "${deploy_options_array['congress']}" == 'True' ]; then
fi
EOI
- # for virtual, we NAT public network through Undercloud
+ # for virtual, we NAT external network through Undercloud
# same goes for baremetal if only jumphost has external connectivity
- if [ "$virtual" == "TRUE" ] || ! test_overcloud_connectivity && [ "$public_network_ipv6" != "True" ]; then
- if ! configure_undercloud_nat ${public_network_cidr}; then
- echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${public_network_cidr}${reset}"
+ if [ "$virtual" == "TRUE" ] || ! test_overcloud_connectivity && [ "$external_network_ipv6" != "True" ]; then
+ if ! configure_undercloud_nat ${external_cidr}; then
+ echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${external_cidr}${reset}"
exit 1
else
- echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud public network${reset}"
+ echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud external network${reset}"
fi
fi
diff --git a/lib/python/apex/common/constants.py b/lib/python/apex/common/constants.py
index dfb6267b..db0a9fd1 100644
--- a/lib/python/apex/common/constants.py
+++ b/lib/python/apex/common/constants.py
@@ -7,12 +7,15 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-ADMIN_NETWORK = 'admin_network'
-PRIVATE_NETWORK = 'private_network'
-PUBLIC_NETWORK = 'public_network'
-STORAGE_NETWORK = 'storage_network'
-API_NETWORK = 'api_network'
-OPNFV_NETWORK_TYPES = [ADMIN_NETWORK, PRIVATE_NETWORK, PUBLIC_NETWORK,
+ADMIN_NETWORK = 'admin'
+TENANT_NETWORK = 'tenant'
+EXTERNAL_NETWORK = 'external'
+STORAGE_NETWORK = 'storage'
+API_NETWORK = 'api'
+CONTROLLER = 'controller'
+COMPUTE = 'compute'
+
+OPNFV_NETWORK_TYPES = [ADMIN_NETWORK, TENANT_NETWORK, EXTERNAL_NETWORK,
STORAGE_NETWORK, API_NETWORK]
DNS_SERVERS = ["8.8.8.8", "8.8.4.4"]
COMPUTE = 'compute'
diff --git a/lib/python/apex/common/utils.py b/lib/python/apex/common/utils.py
index fe34096d..d623638c 100644
--- a/lib/python/apex/common/utils.py
+++ b/lib/python/apex/common/utils.py
@@ -19,5 +19,5 @@ def str2bool(var):
def parse_yaml(yaml_file):
with open(yaml_file) as f:
- parsed_dict = yaml.load(f)
+ parsed_dict = yaml.safe_load(f)
return parsed_dict
diff --git a/lib/python/apex/deploy_settings.py b/lib/python/apex/deploy_settings.py
index b70efdac..c27eecf9 100644
--- a/lib/python/apex/deploy_settings.py
+++ b/lib/python/apex/deploy_settings.py
@@ -40,7 +40,7 @@ class DeploySettings(dict):
init_dict = {}
if type(filename) is str:
with open(filename, 'r') as deploy_settings_file:
- init_dict = yaml.load(deploy_settings_file)
+ init_dict = yaml.safe_load(deploy_settings_file)
else:
# assume input is a dict to build from
init_dict = filename
diff --git a/lib/python/apex/inventory.py b/lib/python/apex/inventory.py
index f4a33b28..aa219680 100644
--- a/lib/python/apex/inventory.py
+++ b/lib/python/apex/inventory.py
@@ -24,7 +24,7 @@ class Inventory(dict):
init_dict = {}
if type(source) is str:
with open(source, 'r') as network_settings_file:
- yaml_dict = yaml.load(network_settings_file)
+ yaml_dict = yaml.safe_load(network_settings_file)
# collapse node identifiers from the structure
init_dict['nodes'] = list(map(lambda n: n[1],
yaml_dict['nodes'].items()))
diff --git a/lib/python/apex/network_environment.py b/lib/python/apex/network_environment.py
index 175f408f..5cb2d0cd 100644
--- a/lib/python/apex/network_environment.py
+++ b/lib/python/apex/network_environment.py
@@ -10,10 +10,12 @@
import yaml
import re
from .common.constants import (
+ CONTROLLER,
+ COMPUTE,
ADMIN_NETWORK,
- PRIVATE_NETWORK,
+ TENANT_NETWORK,
STORAGE_NETWORK,
- PUBLIC_NETWORK,
+ EXTERNAL_NETWORK,
API_NETWORK,
CONTROLLER_PRE,
COMPUTE_PRE,
@@ -56,116 +58,112 @@ class NetworkEnvironment(dict):
"""
def __init__(self, net_settings, filename, compute_pre_config=False,
controller_pre_config=False):
+ """
+ Create Network Environment according to Network Settings
+ """
init_dict = {}
if type(filename) is str:
with open(filename, 'r') as net_env_fh:
- init_dict = yaml.load(net_env_fh)
+ init_dict = yaml.safe_load(net_env_fh)
super().__init__(init_dict)
try:
- enabled_networks = net_settings.enabled_network_list
+ enabled_nets = net_settings.enabled_network_list
except:
raise NetworkEnvException('Invalid Network Setting object')
self._set_tht_dir()
- enabled_networks = net_settings.get_enabled_networks()
+ nets = net_settings['networks']
- admin_cidr = net_settings[ADMIN_NETWORK]['cidr']
+ admin_cidr = nets[ADMIN_NETWORK]['cidr']
admin_prefix = str(admin_cidr.prefixlen)
self[param_def]['ControlPlaneSubnetCidr'] = admin_prefix
self[param_def]['ControlPlaneDefaultRoute'] = \
- net_settings[ADMIN_NETWORK]['provisioner_ip']
- public_cidr = net_settings[PUBLIC_NETWORK]['cidr']
- self[param_def]['ExternalNetCidr'] = str(public_cidr)
- if net_settings[PUBLIC_NETWORK]['vlan'] != 'native':
- self[param_def]['NeutronExternalNetworkBridge'] = '""'
- self[param_def]['ExternalNetworkVlanID'] = \
- net_settings[PUBLIC_NETWORK]['vlan']
- public_range = \
- net_settings[PUBLIC_NETWORK]['usable_ip_range'].split(',')
- self[param_def]['ExternalAllocationPools'] = \
- [{'start':
- public_range[0],
- 'end': public_range[1]
- }]
- self[param_def]['ExternalInterfaceDefaultRoute'] = \
- net_settings[PUBLIC_NETWORK]['gateway']
+ nets[ADMIN_NETWORK]['installer_vm']['ip']
self[param_def]['EC2MetadataIp'] = \
- net_settings[ADMIN_NETWORK]['provisioner_ip']
+ nets[ADMIN_NETWORK]['installer_vm']['ip']
self[param_def]['DnsServers'] = net_settings['dns_servers']
- if public_cidr.version == 6:
- postfix = '/external_v6.yaml'
+ if EXTERNAL_NETWORK in enabled_nets:
+ external_cidr = nets[EXTERNAL_NETWORK][0]['cidr']
+ self[param_def]['ExternalNetCidr'] = str(external_cidr)
+ if type(nets[EXTERNAL_NETWORK][0]['installer_vm']['vlan']) is int:
+ self[param_def]['NeutronExternalNetworkBridge'] = '""'
+ self[param_def]['ExternalNetworkVlanID'] = \
+ nets[EXTERNAL_NETWORK][0]['installer_vm']['vlan']
+ external_range = nets[EXTERNAL_NETWORK][0]['usable_ip_range']
+ self[param_def]['ExternalAllocationPools'] = \
+ [{'start': str(external_range[0]),
+ 'end': str(external_range[1])}]
+ self[param_def]['ExternalInterfaceDefaultRoute'] = \
+ nets[EXTERNAL_NETWORK][0]['gateway']
+
+ if external_cidr.version == 6:
+ postfix = '/external_v6.yaml'
+ else:
+ postfix = '/external.yaml'
else:
- postfix = '/external.yaml'
+ postfix = '/noop.yaml'
# apply resource registry update for EXTERNAL_RESOURCES
self._config_resource_reg(EXTERNAL_RESOURCES, postfix)
- if PRIVATE_NETWORK in enabled_networks:
- priv_range = net_settings[PRIVATE_NETWORK][
- 'usable_ip_range'].split(',')
+ if TENANT_NETWORK in enabled_nets:
+ tenant_range = nets[TENANT_NETWORK]['usable_ip_range']
self[param_def]['TenantAllocationPools'] = \
- [{'start':
- priv_range[0],
- 'end': priv_range[1]
- }]
- priv_cidr = net_settings[PRIVATE_NETWORK]['cidr']
- self[param_def]['TenantNetCidr'] = str(priv_cidr)
- if priv_cidr.version == 6:
+ [{'start': str(tenant_range[0]),
+ 'end': str(tenant_range[1])}]
+ tenant_cidr = nets[TENANT_NETWORK]['cidr']
+ self[param_def]['TenantNetCidr'] = str(tenant_cidr)
+ if tenant_cidr.version == 6:
postfix = '/tenant_v6.yaml'
else:
postfix = '/tenant.yaml'
- if net_settings[PRIVATE_NETWORK]['vlan'] != 'native':
- self[param_def]['TenantNetworkVlanID'] = \
- net_settings[PRIVATE_NETWORK]['vlan']
+
+ tenant_vlan = self._get_vlan(nets[TENANT_NETWORK])
+ if type(tenant_vlan) is int:
+ self[param_def]['TenantNetworkVlanID'] = tenant_vlan
else:
postfix = '/noop.yaml'
# apply resource registry update for TENANT_RESOURCES
self._config_resource_reg(TENANT_RESOURCES, postfix)
- if STORAGE_NETWORK in enabled_networks:
- storage_range = net_settings[STORAGE_NETWORK][
- 'usable_ip_range'].split(',')
+ if STORAGE_NETWORK in enabled_nets:
+ storage_range = nets[STORAGE_NETWORK]['usable_ip_range']
self[param_def]['StorageAllocationPools'] = \
- [{'start':
- storage_range[0],
- 'end':
- storage_range[1]
- }]
- storage_cidr = net_settings[STORAGE_NETWORK]['cidr']
+ [{'start': str(storage_range[0]),
+ 'end': str(storage_range[1])}]
+ storage_cidr = nets[STORAGE_NETWORK]['cidr']
self[param_def]['StorageNetCidr'] = str(storage_cidr)
if storage_cidr.version == 6:
postfix = '/storage_v6.yaml'
else:
postfix = '/storage.yaml'
- if net_settings[STORAGE_NETWORK]['vlan'] != 'native':
- self[param_def]['StorageNetworkVlanID'] = \
- net_settings[STORAGE_NETWORK]['vlan']
+ storage_vlan = self._get_vlan(nets[STORAGE_NETWORK])
+ if type(storage_vlan) is int:
+ self[param_def]['StorageNetworkVlanID'] = storage_vlan
else:
postfix = '/noop.yaml'
# apply resource registry update for STORAGE_RESOURCES
self._config_resource_reg(STORAGE_RESOURCES, postfix)
- if API_NETWORK in enabled_networks:
- api_range = net_settings[API_NETWORK][
- 'usable_ip_range'].split(',')
+ if API_NETWORK in enabled_nets:
+ api_range = nets[API_NETWORK]['usable_ip_range']
self[param_def]['InternalApiAllocationPools'] = \
- [{'start': api_range[0],
- 'end': api_range[1]
- }]
- api_cidr = net_settings[API_NETWORK]['cidr']
+ [{'start': str(api_range[0]),
+ 'end': str(api_range[1])}]
+ api_cidr = nets[API_NETWORK]['cidr']
self[param_def]['InternalApiNetCidr'] = str(api_cidr)
if api_cidr.version == 6:
postfix = '/internal_api_v6.yaml'
else:
postfix = '/internal_api.yaml'
- if net_settings[API_NETWORK]['vlan'] != 'native':
- self[param_def]['InternalApiNetworkVlanID'] = \
- net_settings[API_NETWORK]['vlan']
+ api_vlan = self._get_vlan(nets[API_NETWORK])
+ if type(api_vlan) is int:
+ self[param_def]['InternalApiNetworkVlanID'] = api_vlan
else:
postfix = '/noop.yaml'
@@ -184,6 +182,14 @@ class NetworkEnvironment(dict):
for flag in IPV6_FLAGS:
self[param_def][flag] = True
+ def _get_vlan(self, network):
+ if type(network['nic_mapping'][CONTROLLER]['vlan']) is int:
+ return network['nic_mapping'][CONTROLLER]['vlan']
+ elif type(network['nic_mapping'][COMPUTE]['vlan']) is int:
+ return network['nic_mapping'][COMPUTE]['vlan']
+ else:
+ return 'native'
+
def _set_tht_dir(self):
self.tht_dir = None
for key, prefix in TENANT_RESOURCES.items():
diff --git a/lib/python/apex/network_settings.py b/lib/python/apex/network_settings.py
index ca91b8cf..006d18c3 100644
--- a/lib/python/apex/network_settings.py
+++ b/lib/python/apex/network_settings.py
@@ -10,20 +10,21 @@
import yaml
import logging
import ipaddress
+
+from copy import copy
+
from . import ip_utils
-from .common.utils import str2bool
+from .common import utils
from .common.constants import (
+ CONTROLLER,
+ COMPUTE,
+ ROLES,
+ DOMAIN_NAME,
+ DNS_SERVERS,
ADMIN_NETWORK,
- PRIVATE_NETWORK,
- PUBLIC_NETWORK,
- STORAGE_NETWORK,
- API_NETWORK,
+ EXTERNAL_NETWORK,
OPNFV_NETWORK_TYPES,
- DNS_SERVERS,
- DOMAIN_NAME,
- ROLES,
- COMPUTE,
- CONTROLLER)
+)
class NetworkSettings(dict):
@@ -38,15 +39,14 @@ class NetworkSettings(dict):
for deploy.sh consumption. This object will later be used directly as
deployment script move to python.
"""
- def __init__(self, filename, network_isolation):
+ def __init__(self, filename):
init_dict = {}
if type(filename) is str:
with open(filename, 'r') as network_settings_file:
- init_dict = yaml.load(network_settings_file)
+ init_dict = yaml.safe_load(network_settings_file)
else:
# assume input is a dict to build from
init_dict = filename
-
super().__init__(init_dict)
if 'apex' in self:
@@ -63,52 +63,56 @@ class NetworkSettings(dict):
# merge the apex specific config into the first class settings
merge(self, copy(self['apex']))
- self.network_isolation = network_isolation
self.enabled_network_list = []
self.nics = {COMPUTE: {}, CONTROLLER: {}}
self.nics_specified = {COMPUTE: False, CONTROLLER: False}
self._validate_input()
+ def get_network(self, network):
+ if network == EXTERNAL_NETWORK and self['networks'][network]:
+ return self['networks'][network][0]
+ else:
+ return self['networks'][network]
+
def _validate_input(self):
"""
Validates the network settings file and populates all fields.
NetworkSettingsException will be raised if validation fails.
"""
- if ADMIN_NETWORK not in self or \
- not str2bool(self[ADMIN_NETWORK].get(
- 'enabled')):
- raise NetworkSettingsException("You must enable admin_network "
- "and configure it explicitly or "
- "use auto-detection")
- if self.network_isolation and \
- (PUBLIC_NETWORK not in self or not
- str2bool(self[PUBLIC_NETWORK].get(
- 'enabled'))):
- raise NetworkSettingsException("You must enable public_network "
+ if not self['networks'].get(ADMIN_NETWORK, {}).get('enabled', False):
+ raise NetworkSettingsException("You must enable admin network "
"and configure it explicitly or "
"use auto-detection")
for network in OPNFV_NETWORK_TYPES:
- if network in self:
- if str2bool(self[network].get('enabled')):
+ if network in self['networks']:
+ _network = self.get_network(network)
+ if _network.get('enabled', True):
logging.info("{} enabled".format(network))
self._config_required_settings(network)
+ if network == EXTERNAL_NETWORK:
+ nicmap = _network['nic_mapping']
+ else:
+ nicmap = _network['nic_mapping']
+ iface = nicmap[CONTROLLER]['members'][0]
self._config_ip_range(network=network,
- setting='usable_ip_range',
+ interface=iface,
+ ip_range='usable_ip_range',
start_offset=21, end_offset=21)
- self._config_optional_settings(network)
self.enabled_network_list.append(network)
self._validate_overcloud_nic_order(network)
+ # TODO self._config_optional_settings(network)
else:
logging.info("{} disabled, will collapse with "
- "admin_network".format(network))
+ "admin network".format(network))
else:
logging.info("{} is not in specified, will collapse with "
- "admin_network".format(network))
+ "admin network".format(network))
+ if 'dns-domain' not in self:
+ self['domain_name'] = DOMAIN_NAME
self['dns_servers'] = self.get('dns_servers', DNS_SERVERS)
- self['domain_name'] = self.get('domain_name', DOMAIN_NAME)
def _validate_overcloud_nic_order(self, network):
"""
@@ -116,42 +120,35 @@ class NetworkSettings(dict):
for network
If nic order is specified in a network for a profile, it should be
- specified for every network with that profile other than admin_network
+ specified for every network with that profile other than admin network
Duplicate nic names are also not allowed across different networks
:param network: network to detect if nic order present
:return: None
"""
-
for role in ROLES:
- interface = role+'_interface'
- nic_index = self.get_enabled_networks().index(network) + 1
- if interface in self[network]:
- if any(y == self[network][interface] for x, y in
- self.nics[role].items()):
- raise NetworkSettingsException("Duplicate {} already "
- "specified for "
- "another network"
- .format(self[network]
- [interface]))
- self.nics[role][network] = self[network][interface]
+ _network = self.get_network(network)
+ _nicmap = _network.get('nic_mapping', {})
+ _role = _nicmap.get(role, {})
+ interfaces = _role.get('members', [])
+
+ if interfaces:
+ interface = interfaces[0]
+ if type(_role.get('vlan', 'native')) is not int and \
+ any(y == interface for x, y in self.nics[role].items()):
+ raise NetworkSettingsException(
+ "Duplicate {} already specified for "
+ "another network".format(interface))
+ self.nics[role][network] = interface
self.nics_specified[role] = True
logging.info("{} nic order specified for network {"
"}".format(role, network))
- elif self.nics_specified[role]:
- logging.error("{} nic order not specified for network {"
- "}".format(role, network))
- raise NetworkSettingsException("Must specify {} for all "
- "enabled networks (other than "
- " admin) or not specify it for "
- "any".format(interface))
else:
- logging.info("{} nic order not specified for network {"
- "}. Will use logical default "
- "nic{}".format(interface, network, nic_index))
- self.nics[role][network] = 'nic' + str(nic_index)
- nic_index += 1
+ raise NetworkSettingsException(
+ "Interface members are not supplied for {} network "
+ "for the {} role. Please add nic assignments"
+ "".format(network, role))
def _config_required_settings(self, network):
"""
@@ -164,85 +161,93 @@ class NetworkSettings(dict):
given NIC in the system. The resulting config in settings object will
be an ipaddress.network object, replacing the NIC name.
"""
+ _network = self.get_network(network)
# if vlan not defined then default it to native
if network is not ADMIN_NETWORK:
- if 'vlan' not in self[network]:
- self[network]['vlan'] = 'native'
+ for role in ROLES:
+ if 'vlan' not in _network['nic_mapping'][role]:
+ _network['nic_mapping'][role]['vlan'] = 'native'
- cidr = self[network].get('cidr')
- nic_name = self[network].get('bridged_interface')
+ cidr = _network.get('cidr')
if cidr:
- cidr = ipaddress.ip_network(self[network]['cidr'])
- self[network]['cidr'] = cidr
+ cidr = ipaddress.ip_network(_network['cidr'])
+ _network['cidr'] = cidr
logging.info("{}_cidr: {}".format(network, cidr))
- return 0
- elif nic_name:
+ elif 'installer_vm' in _network:
+ ucloud_if_list = _network['installer_vm']['members']
# If cidr is not specified, we need to know if we should find
# IPv6 or IPv4 address on the interface
- if str2bool(self[network].get('ipv6')):
- address_family = 6
- else:
- address_family = 4
- nic_interface = ip_utils.get_interface(nic_name, address_family)
- if nic_interface:
- self[network]['bridged_interface'] = nic_interface
+ ip = ipaddress.ip_address(_network['installer_vm']['ip'])
+ nic_if = ip_utils.get_interface(ucloud_if_list[0], ip.version)
+ if nic_if:
+ ucloud_if_list = [nic_if]
logging.info("{}_bridged_interface: {}".
- format(network, nic_interface))
- return 0
+ format(network, nic_if))
else:
- raise NetworkSettingsException("Auto detection failed for {}: "
- "Unable to find valid ip for "
- "interface {}"
- .format(network, nic_name))
+ raise NetworkSettingsException(
+ "Auto detection failed for {}: Unable to find valid "
+ "ip for interface {}".format(network, ucloud_if_list[0]))
else:
- raise NetworkSettingsException("Auto detection failed for {}: "
- "either bridge_interface or cidr "
- "must be specified"
- .format(network))
+ raise NetworkSettingsException(
+ "Auto detection failed for {}: either installer_vm "
+ "members or cidr must be specified".format(network))
- def _config_ip_range(self, network, setting, start_offset=None,
- end_offset=None, count=None):
+ # undercloud settings
+ if network == ADMIN_NETWORK:
+ provisioner_ip = _network['installer_vm']['ip']
+ iface = _network['installer_vm']['members'][0]
+ if not provisioner_ip:
+ _network['installer_vm']['ip'] = self._gen_ip(network, 1)
+ self._config_ip_range(network=network, interface=iface,
+ ip_range='dhcp_range',
+ start_offset=2, count=9)
+ self._config_ip_range(network=network, interface=iface,
+ ip_range='introspection_range',
+ start_offset=11, count=9)
+ elif network == EXTERNAL_NETWORK:
+ provisioner_ip = _network['installer_vm']['ip']
+ iface = _network['installer_vm']['members'][0]
+ if not provisioner_ip:
+ _network['installer_vm']['ip'] = self._gen_ip(network, 1)
+ self._config_ip_range(network=network, interface=iface,
+ ip_range='floating_ip_range',
+ end_offset=2, count=20)
+
+ gateway = _network['gateway']
+ interface = _network['installer_vm']['ip']
+ self._config_gateway(network, gateway, interface)
+
+ def _config_ip_range(self, network, ip_range, interface=None,
+ start_offset=None, end_offset=None, count=None):
"""
Configures IP range for a given setting.
-
If the setting is already specified, no change will be made.
-
The spec for start_offset, end_offset and count are identical to
ip_utils.get_ip_range.
"""
- ip_range = self[network].get(setting)
- interface = self[network].get('bridged_interface')
-
- if not ip_range:
- cidr = self[network].get('cidr')
- ip_range = ip_utils.get_ip_range(start_offset=start_offset,
- end_offset=end_offset,
- count=count,
- cidr=cidr,
- interface=interface)
- self[network][setting] = ip_range
-
- logging.info("{}_{}: {}".format(network, setting, ip_range))
-
- def _config_ip(self, network, setting, offset):
+ _network = self.get_network(network)
+ if ip_range not in _network:
+ cidr = _network.get('cidr')
+ _ip_range = ip_utils.get_ip_range(start_offset=start_offset,
+ end_offset=end_offset,
+ count=count,
+ cidr=cidr,
+ interface=interface)
+ _network[ip_range] = _ip_range.split(',')
+
+ logging.info("Config IP Range: {} {}".format(network, ip_range))
+
+ def _gen_ip(self, network, offset):
"""
- Configures IP for a given setting.
-
- If the setting is already specified, no change will be made.
-
- The spec for offset is identical to ip_utils.get_ip
+ Generate and ip offset within the given network
"""
- ip = self[network].get(setting)
- interface = self[network].get('bridged_interface')
-
- if not ip:
- cidr = self[network].get('cidr')
- ip = ip_utils.get_ip(offset, cidr, interface)
- self[network][setting] = ip
-
- logging.info("{}_{}: {}".format(network, setting, ip))
+ _network = self.get_network(network)
+ cidr = _network.get('cidr')
+ ip = ip_utils.get_ip(offset, cidr)
+ logging.info("Config IP: {} {}".format(network, ip))
+ return ip
def _config_optional_settings(self, network):
"""
@@ -257,42 +262,41 @@ class NetworkSettings(dict):
- gateway
"""
if network == ADMIN_NETWORK:
- self._config_ip(network, 'provisioner_ip', 1)
- self._config_ip_range(network=network, setting='dhcp_range',
+ self._config_ip(network, None, 'provisioner_ip', 1)
+ self._config_ip_range(network=network,
+ ip_range='dhcp_range',
start_offset=2, count=9)
self._config_ip_range(network=network,
- setting='introspection_range',
+ ip_range='introspection_range',
start_offset=11, count=9)
- elif network == PUBLIC_NETWORK:
- self._config_ip(network, 'provisioner_ip', 1)
+ elif network == EXTERNAL_NETWORK:
+ self._config_ip(network, None, 'provisioner_ip', 1)
self._config_ip_range(network=network,
- setting='floating_ip_range',
+ ip_range='floating_ip_range',
end_offset=2, count=20)
self._config_gateway(network)
- def _config_gateway(self, network):
+ def _config_gateway(self, network, gateway, interface):
"""
Configures gateway setting for a given network.
If cidr is specified, we always use the first address in the address
space for gateway. Otherwise, we detect the system gateway.
"""
- gateway = self[network].get('gateway')
- interface = self[network].get('bridged_interface')
-
+ _network = self.get_network(network)
if not gateway:
- cidr = self[network].get('cidr')
+ cidr = _network.get('cidr')
if cidr:
- gateway = ip_utils.get_ip(1, cidr)
+ _gateway = ip_utils.get_ip(1, cidr)
else:
- gateway = ip_utils.find_gateway(interface)
+ _gateway = ip_utils.find_gateway(interface)
- if gateway:
- self[network]['gateway'] = gateway
+ if _gateway:
+ _network['gateway'] = _gateway
else:
raise NetworkSettingsException("Failed to set gateway")
- logging.info("{}_gateway: {}".format(network, gateway))
+ logging.info("Config Gateway: {} {}".format(network, gateway))
def dump_bash(self, path=None):
"""
@@ -301,45 +305,50 @@ class NetworkSettings(dict):
If optional path is provided, bash string will be written to the file
instead of stdout.
"""
+ def flatten(name, obj, delim=','):
+ """
+ flatten lists to delim separated strings
+ flatten dics to underscored key names and string values
+ """
+ if type(obj) is list:
+ return "{}=\'{}\'\n".format(name,
+ delim.join(map(lambda x: str(x),
+ obj)))
+ elif type(obj) is dict:
+ flat_str = ''
+ for k in obj:
+ flat_str += flatten("{}_{}".format(name, k), obj[k])
+ return flat_str
+ elif type(obj) is str:
+ return "{}='{}'\n".format(name, obj)
+ else:
+ return "{}={}\n".format(name, str(obj))
+
bash_str = ''
for network in self.enabled_network_list:
- for key, value in self[network].items():
- bash_str += "{}_{}={}\n".format(network, key, value)
- bash_str += "enabled_network_list='{}'\n" \
- .format(' '.join(self.enabled_network_list))
- bash_str += "ip_addr_family={}\n".format(self.get_ip_addr_family())
- dns_list = ""
- for dns_server in self['dns_servers']:
- dns_list = dns_list + "{} ".format(dns_server)
- dns_list = dns_list.strip()
- bash_str += "dns_servers=\'{}\'\n".format(dns_list)
- bash_str += "domain_name=\'{}\'\n".format(self['domain_name'])
+ _network = self.get_network(network)
+ bash_str += flatten(network, _network)
+ bash_str += flatten('enabled_network_list',
+ self.enabled_network_list, ' ')
+ bash_str += flatten('ip_addr_family', self.get_ip_addr_family())
+ bash_str += flatten('dns_servers', self['dns_servers'], ' ')
+ bash_str += flatten('domain_name', self['dns-domain'], ' ')
if path:
with open(path, 'w') as file:
file.write(bash_str)
else:
print(bash_str)
- def get_ip_addr_family(self):
+ def get_ip_addr_family(self,):
"""
Returns IP address family for current deployment.
If any enabled network has IPv6 CIDR, the deployment is classified as
IPv6.
"""
- for network in self.enabled_network_list:
- cidr = ipaddress.ip_network(self[network]['cidr'])
- if cidr.version == 6:
- return 6
-
- return 4
-
- def get_enabled_networks(self):
- """
- Getter for enabled network list
- :return: list of enabled networks
- """
- return self.enabled_network_list
+ return max([
+ ipaddress.ip_network(self.get_network(n)['cidr']).version
+ for n in self.enabled_network_list])
class NetworkSettingsException(Exception):
diff --git a/lib/python/apex_python_utils.py b/lib/python/apex_python_utils.py
index ebc49dc5..b0ebb270 100755
--- a/lib/python/apex_python_utils.py
+++ b/lib/python/apex_python_utils.py
@@ -14,8 +14,6 @@ import logging
import os
import yaml
-from copy import copy
-
from jinja2 import Environment
from jinja2 import FileSystemLoader
@@ -35,15 +33,14 @@ def parse_net_settings(args):
Args:
- file: string
file to network_settings.yaml file
- - network_isolation: bool
- enable or disable network_isolation
"""
- settings = NetworkSettings(args.net_settings_file,
- args.network_isolation)
+ settings = NetworkSettings(args.net_settings_file)
net_env = NetworkEnvironment(settings, args.net_env_file,
args.compute_pre_config,
args.controller_pre_config)
- dump_yaml(dict(net_env), '/tmp/network-environment.yaml')
+ target = args.target_dir.split('/')
+ target.append('network-environment.yaml')
+ dump_yaml(dict(net_env), '/'.join(target))
settings.dump_bash()
@@ -106,25 +103,15 @@ def build_nic_template(args):
"""
template_dir, template = args.template.rsplit('/', 1)
- netsets = NetworkSettings(args.net_settings_file,
- args.network_isolation)
- env = Environment(loader=FileSystemLoader(template_dir))
+ netsets = NetworkSettings(args.net_settings_file)
+ env = Environment(loader=FileSystemLoader(template_dir), autoescape=True)
template = env.get_template(template)
- # gather vlan values into a dict
- net_list = copy(netsets.enabled_network_list)
- net_list.remove(ADMIN_NETWORK)
- vlans_vals = map(lambda x: netsets[x]['vlan'], net_list)
- vlans = dict(zip(net_list, vlans_vals))
- nics = netsets.nics
-
- print(template.render(enabled_networks=netsets.enabled_network_list,
+ print(template.render(nets=netsets['networks'],
role=args.role,
- vlans=vlans,
+ external_net_af=netsets.get_ip_addr_family(),
external_net_type=args.ext_net_type,
- external_net_af=args.address_family,
- ovs_dpdk_bridge=args.ovs_dpdk_bridge,
- nics=nics))
+ ovs_dpdk_bridge=args.ovs_dpdk_bridge))
def get_parser():
@@ -141,13 +128,15 @@ def get_parser():
default='network-settings.yaml',
dest='net_settings_file',
help='path to network settings file')
- net_settings.add_argument('--flat', action='store_false',
- default=True, dest='network_isolation',
- help='disable network isolation')
net_settings.add_argument('-e', '--net-env-file',
default="network-environment.yaml",
dest='net_env_file',
help='path to network environment file')
+ net_settings.add_argument('-td', '--target-dir',
+ default="/tmp",
+ dest='target_dir',
+ help='directory to write the'
+ 'network-environment.yaml file')
net_settings.add_argument('--compute-pre-config',
default=False,
action='store_true',
@@ -182,15 +171,10 @@ def get_parser():
default='network-settings.yaml',
dest='net_settings_file',
help='path to network settings file')
- nic_template.add_argument('--flat', action='store_false',
- default=True, dest='network_isolation',
- help='disable network isolation')
nic_template.add_argument('-e', '--ext-net-type', default='interface',
dest='ext_net_type',
choices=['interface', 'br-ex'],
help='External network type')
- nic_template.add_argument('-af', '--address-family', type=int, default=4,
- dest='address_family', help='IP address family')
nic_template.add_argument('-d', '--ovs-dpdk-bridge',
default=None, dest='ovs_dpdk_bridge',
help='OVS DPDK Bridge Name')
diff --git a/lib/undercloud-functions.sh b/lib/undercloud-functions.sh
index 177fe443..98552f29 100755
--- a/lib/undercloud-functions.sh
+++ b/lib/undercloud-functions.sh
@@ -12,9 +12,9 @@
##params: none
function setup_undercloud_vm {
if ! virsh list --all | grep undercloud > /dev/null; then
- undercloud_nets="default admin_network"
- if [[ $enabled_network_list =~ "public_network" ]]; then
- undercloud_nets+=" public_network"
+ undercloud_nets="default admin"
+ if [[ $enabled_network_list =~ "external" ]]; then
+ undercloud_nets+=" external"
fi
define_vm undercloud hd 30 "$undercloud_nets" 4 12288
@@ -120,32 +120,31 @@ function configure_undercloud {
local controller_nic_template compute_nic_template
echo
echo "Copying configuration files to Undercloud"
- if [[ "$net_isolation_enabled" == "TRUE" ]]; then
- echo -e "${blue}Network Environment set for Deployment: ${reset}"
- cat /tmp/network-environment.yaml
- scp ${SSH_OPTIONS[@]} /tmp/network-environment.yaml "stack@$UNDERCLOUD":
-
- # check for ODL L3/ONOS
- if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
- ext_net_type=br-ex
- fi
-
- if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- ovs_dpdk_bridge='br-phy'
- else
- ovs_dpdk_bridge=''
- fi
-
- if ! controller_nic_template=$(python3.4 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS $net_isolation_arg -t $CONFIG/nics-template.yaml.jinja2 -e "br-ex" -af $ip_addr_family); then
- echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
- exit 1
- fi
+ echo -e "${blue}Network Environment set for Deployment: ${reset}"
+ cat $APEX_TMP_DIR/network-environment.yaml
+ scp ${SSH_OPTIONS[@]} $APEX_TMP_DIR/network-environment.yaml "stack@$UNDERCLOUD":
- if ! compute_nic_template=$(python3.4 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS $net_isolation_arg -t $CONFIG/nics-template.yaml.jinja2 -e $ext_net_type -af $ip_addr_family -d "$ovs_dpdk_bridge"); then
- echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
- exit 1
- fi
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
+ # check for ODL L3/ONOS
+ if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
+ ext_net_type=br-ex
+ fi
+
+ if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
+ ovs_dpdk_bridge='br-phy'
+ else
+ ovs_dpdk_bridge=''
+ fi
+
+ if ! controller_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -t $CONFIG/nics-template.yaml.jinja2 -e "br-ex"); then
+ echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
+ exit 1
+ fi
+
+ if ! compute_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -t $CONFIG/nics-template.yaml.jinja2 -e $ext_net_type -d "$ovs_dpdk_bridge"); then
+ echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
+ exit 1
+ fi
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
mkdir nics/
cat > nics/controller.yaml << EOF
$controller_nic_template
@@ -154,7 +153,6 @@ cat > nics/compute.yaml << EOF
$compute_nic_template
EOF
EOI
- fi
# ensure stack user on Undercloud machine has an ssh key
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
@@ -189,25 +187,24 @@ EOI
echo "Running undercloud configuration."
echo "Logging undercloud configuration to undercloud:/home/stack/apex-undercloud-install.log"
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
-if [[ "$net_isolation_enabled" == "TRUE" ]]; then
- sed -i 's/#local_ip/local_ip/' undercloud.conf
- sed -i 's/#network_gateway/network_gateway/' undercloud.conf
- sed -i 's/#network_cidr/network_cidr/' undercloud.conf
- sed -i 's/#dhcp_start/dhcp_start/' undercloud.conf
- sed -i 's/#dhcp_end/dhcp_end/' undercloud.conf
- sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf
- sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf
-
- openstack-config --set undercloud.conf DEFAULT local_ip ${admin_network_provisioner_ip}/${admin_network_cidr##*/}
- openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_network_provisioner_ip}
- openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_network_cidr}
- openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_network_dhcp_range%%,*}
- openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_network_dhcp_range##*,}
- openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_network_introspection_range}
- openstack-config --set undercloud.conf DEFAULT undercloud_debug false
- openstack-config --set undercloud.conf DEFAULT undercloud_hostname "undercloud.${domain_name}"
-
-fi
+sed -i 's/#local_ip/local_ip/' undercloud.conf
+sed -i 's/#network_gateway/network_gateway/' undercloud.conf
+sed -i 's/#network_cidr/network_cidr/' undercloud.conf
+sed -i 's/#dhcp_start/dhcp_start/' undercloud.conf
+sed -i 's/#dhcp_end/dhcp_end/' undercloud.conf
+sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf
+sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf
+
+openstack-config --set undercloud.conf DEFAULT local_ip ${admin_installer_vm_ip}/${admin_cidr##*/}
+openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_installer_vm_ip}
+openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_cidr}
+openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_dhcp_range%%,*}
+openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_dhcp_range##*,}
+openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_introspection_range}
+openstack-config --set undercloud.conf DEFAULT undercloud_debug false
+openstack-config --set undercloud.conf DEFAULT undercloud_hostname "undercloud.${domain_name}"
+sudo openstack-config --set /etc/ironic/ironic.conf disk_utils iscsi_verify_attempts 30
+sudo openstack-config --set /etc/ironic/ironic.conf disk_partitioner check_device_max_retries 40
sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
@@ -252,22 +249,22 @@ EOI
# configure external network
ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" << EOI
-if [[ "$public_network_vlan" != "native" ]]; then
- cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-vlan${public_network_vlan}
-DEVICE=vlan${public_network_vlan}
+if [[ "$external_installer_vm_vlan" != "native" ]]; then
+ cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-vlan${external_installer_vm_vlan}
+DEVICE=vlan${external_installer_vm_vlan}
ONBOOT=yes
DEVICETYPE=ovs
TYPE=OVSIntPort
BOOTPROTO=static
-IPADDR=${public_network_provisioner_ip}
-PREFIX=${public_network_cidr##*/}
+IPADDR=${external_installer_vm_ip}
+PREFIX=${external_cidr##*/}
OVS_BRIDGE=br-ctlplane
-OVS_OPTIONS="tag=${public_network_vlan}"
+OVS_OPTIONS="tag=${external_installer_vm_vlan}"
EOF
- ifup vlan${public_network_vlan}
+ ifup vlan${external_installer_vm_vlan}
else
- if ! ip a s eth2 | grep ${public_network_provisioner_ip} > /dev/null; then
- ip a a ${public_network_provisioner_ip}/${public_network_cidr##*/} dev eth2
+ if ! ip a s eth2 | grep ${external_installer_vm_ip} > /dev/null; then
+ ip a a ${external_installer_vm_ip}/${external_cidr##*/} dev eth2
ip link set up dev eth2
fi
fi
diff --git a/lib/virtual-setup-functions.sh b/lib/virtual-setup-functions.sh
index 61dc6799..8aaa3594 100755
--- a/lib/virtual-setup-functions.sh
+++ b/lib/virtual-setup-functions.sh
@@ -23,7 +23,7 @@ function setup_virtual_baremetal {
ramsize=$(($2*1024))
fi
#start by generating the opening yaml for the inventory-virt.yaml file
- cat > /tmp/inventory-virt.yaml << EOF
+ cat > $APEX_TMP_DIR/inventory-virt.yaml << EOF
nodes:
EOF
@@ -51,8 +51,8 @@ EOF
fi
fi
if ! virsh list --all | grep baremetal${i} > /dev/null; then
- define_vm baremetal${i} network 41 'admin_network' $vcpus $ramsize
- for n in private_network public_network storage_network api_network; do
+ define_vm baremetal${i} network 41 'admin' $vcpus $ramsize
+ for n in tenant external storage api; do
if [[ $enabled_network_list =~ $n ]]; then
echo -n "$n "
virsh attach-interface --domain baremetal${i} --type network --source $n --model virtio --config
@@ -62,9 +62,9 @@ EOF
echo "Found baremetal${i} VM, using existing VM"
fi
#virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2
- mac=$(virsh domiflist baremetal${i} | grep admin_network | awk '{ print $5 }')
+ mac=$(virsh domiflist baremetal${i} | grep admin | awk '{ print $5 }')
- cat >> /tmp/inventory-virt.yaml << EOF
+ cat >> $APEX_TMP_DIR/inventory-virt.yaml << EOF
node${i}:
mac_address: "$mac"
ipmi_ip: 192.168.122.1
diff --git a/tests/config/network_settings_duplicate_nic.yaml b/tests/config/network_settings_duplicate_nic.yaml
deleted file mode 100644
index 24dd5ca0..00000000
--- a/tests/config/network_settings_duplicate_nic.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-# This configuration file defines Network Environment for a
-# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
-#
-# - admin
-# - private*
-# - public
-# - storage*
-#
-# *) optional networks
-#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
-# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
-#
-# See short description of the networks in the comments below.
-#
-
-# DNS Servers for all nodes, comma delimited list
-dns_servers: ["8.8.8.8", "8.8.4.4"]
-
-# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
-#
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- compute_interface: eth1
- controller_interface: eth2
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
-
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
-#
-private_network:
- enabled: true
- vlan: native
- cidr: 11.0.0.0/24
- compute_interface: enp0s4
- controller_interface: nic3
-
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
-#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- compute_interface: eth1
- controller_interface: enp0s3
- vlan: native
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220
- usable_ip_range: 192.168.37.10,192.168.37.199
- provisioner_ip: 192.168.37.1
-
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
-#
-storage_network:
- enabled: true
- vlan: native
- cidr: 12.0.0.0/24
- compute_interface: eth5
- controller_interface: eth6
-
-#admin_network:
-# enabled: true
-# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond
-# bridged_interface: '' #Interface to bridge to for installer VM
-# bond_interfaces: '' #Interfaces to create bond with for installer VM
-# compute_interface: nic4 #Interface used for this network on the compute node. Can either be logical nic name like "nic1" or real name like "eth1"
-# controller_interface: nic4 #Interface used for this network on the controller node. Can either be logical nic name like "nic1" or real name like "eth1"
-# vlan: native #VLAN tag to use, native means none
-# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
-# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected
-# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet
-# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
-# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned
-# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes)
-#private_network:
-# enabled: false #If disabled, internal api traffic will collapse to admin_network
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
diff --git a/tests/config/network_settings_missing_required_nic.yaml b/tests/config/network_settings_missing_required_nic.yaml
deleted file mode 100644
index 18886278..00000000
--- a/tests/config/network_settings_missing_required_nic.yaml
+++ /dev/null
@@ -1,113 +0,0 @@
-# This configuration file defines Network Environment for a
-# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
-#
-# - admin
-# - private*
-# - public
-# - storage*
-#
-# *) optional networks
-#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
-# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
-#
-# See short description of the networks in the comments below.
-#
-
-# DNS Servers for all nodes, comma delimited list
-dns_servers: ["8.8.8.8", "8.8.4.4"]
-
-# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
-#
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- compute_interface: eth1
- controller_interface: eth2
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
-
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
-#
-private_network:
- enabled: true
- vlan: native
- cidr: 11.0.0.0/24
- compute_interface: enp0s4
- controller_interface: nic3
-
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
-#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- vlan: native
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220
- usable_ip_range: 192.168.37.10,192.168.37.199
- provisioner_ip: 192.168.37.1
-
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
-#
-storage_network:
- enabled: true
- vlan: native
- cidr: 12.0.0.0/24
- compute_interface: eth5
- controller_interface: eth6
-
-#admin_network:
-# enabled: true
-# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond
-# bridged_interface: '' #Interface to bridge to for installer VM
-# bond_interfaces: '' #Interfaces to create bond with for installer VM
-# compute_interface: nic4 #Interface used for this network on the compute node. Can either be logical nic name like "nic1" or real name like "eth1"
-# controller_interface: nic4 #Interface used for this network on the controller node. Can either be logical nic name like "nic1" or real name like "eth1"
-# vlan: native #VLAN tag to use, native means none
-# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
-# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected
-# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet
-# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
-# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned
-# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes)
-#private_network:
-# enabled: false #If disabled, internal api traffic will collapse to admin_network
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
diff --git a/tests/config/network_settings_nic1_reserved.yaml b/tests/config/network_settings_nic1_reserved.yaml
deleted file mode 100644
index 8abcfc85..00000000
--- a/tests/config/network_settings_nic1_reserved.yaml
+++ /dev/null
@@ -1,113 +0,0 @@
-# This configuration file defines Network Environment for a
-# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
-#
-# - admin
-# - private*
-# - public
-# - storage*
-#
-# *) optional networks
-#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
-# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
-#
-# See short description of the networks in the comments below.
-#
-
-# DNS Servers for all nodes, comma delimited list
-dns_servers: ["8.8.8.8", "8.8.4.4"]
-
-# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
-#
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
-
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
-#
-private_network:
- enabled: true
- vlan: native
- cidr: 11.0.0.0/24
- compute_interface: enp0s4
- controller_interface: nic3
-
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
-#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- compute_interface: nic1
- controller_interface: enp0s3
- vlan: native
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220
- usable_ip_range: 192.168.37.10,192.168.37.199
- provisioner_ip: 192.168.37.1
-
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
-#
-storage_network:
- enabled: true
- vlan: native
- cidr: 12.0.0.0/24
- compute_interface: eth5
- controller_interface: eth6
-
-#admin_network:
-# enabled: true
-# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond
-# bridged_interface: '' #Interface to bridge to for installer VM
-# bond_interfaces: '' #Interfaces to create bond with for installer VM
-# overcloud_compute_interface: nic4 #Interface used for this network on the compute node. Can either be logical nic name like "nic1" or real name like "eth1"
-# overcloud_controller_interface: nic4 #Interface used for this network on the controller node. Can either be logical nic name like "nic1" or real name like "eth1"
-# vlan: native #VLAN tag to use, native means none
-# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
-# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected
-# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet
-# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
-# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned
-# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes)
-#private_network:
-# enabled: false #If disabled, internal api traffic will collapse to admin_network
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
diff --git a/tests/config/network_settings_nics_not_specified.yaml b/tests/config/network_settings_nics_not_specified.yaml
deleted file mode 100644
index e5089435..00000000
--- a/tests/config/network_settings_nics_not_specified.yaml
+++ /dev/null
@@ -1,107 +0,0 @@
-# This configuration file defines Network Environment for a
-# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
-#
-# - admin
-# - private*
-# - public
-# - storage*
-#
-# *) optional networks
-#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
-# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
-#
-# See short description of the networks in the comments below.
-#
-
-# DNS Servers for all nodes, comma delimited list
-dns_servers: ["8.8.8.8", "8.8.4.4"]
-
-# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
-#
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
-
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
-#
-private_network:
- enabled: true
- vlan: native
- cidr: 11.0.0.0/24
-
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
-#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- vlan: native
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220
- usable_ip_range: 192.168.37.10,192.168.37.199
- provisioner_ip: 192.168.37.1
-
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
-#
-storage_network:
- enabled: true
- vlan: native
- cidr: 12.0.0.0/24
-
-#admin_network:
-# enabled: true
-# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond
-# bridged_interface: '' #Interface to bridge to for installer VM
-# bond_interfaces: '' #Interfaces to create bond with for installer VM
-# compute_interface: nic4 #Interface used for this network on the compute node. Can either be logical nic name like "nic1" or real name like "eth1"
-# controller_interface: nic4 #Interface used for this network on the controller node. Can either be logical nic name like "nic1" or real name like "eth1"
-# vlan: native #VLAN tag to use, native means none
-# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
-# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected
-# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet
-# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
-# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned
-# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes)
-#private_network:
-# enabled: false #If disabled, internal api traffic will collapse to admin_network
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
diff --git a/tests/test_apex_deploy_settings.py b/tests/test_apex_deploy_settings.py
index 1e26b287..2af187b2 100644
--- a/tests/test_apex_deploy_settings.py
+++ b/tests/test_apex_deploy_settings.py
@@ -7,8 +7,9 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import io
# https://docs.python.org/3/library/io.html
+import io
+import tempfile
from apex.deploy_settings import DeploySettings
from apex.deploy_settings import DeploySettingsException
@@ -84,11 +85,14 @@ class TestIpUtils(object):
def test__validate_settings(self):
for c in test_deploy_content:
- f = open('/tmp/apex_deploy_test_file', 'w')
- f.write(c)
- f.close()
- assert_raises(DeploySettingsException,
- DeploySettings, '/tmp/apex_deploy_test_file')
+ try:
+ f = tempfile.NamedTemporaryFile(mode='w')
+ f.write(c)
+ f.flush()
+ assert_raises(DeploySettingsException,
+ DeploySettings, f.name)
+ finally:
+ f.close()
def test_dump_bash(self):
# the performance file has the most use of the function
diff --git a/tests/test_apex_network_environment.py b/tests/test_apex_network_environment.py
index 673368e8..df0f0334 100644
--- a/tests/test_apex_network_environment.py
+++ b/tests/test_apex_network_environment.py
@@ -9,11 +9,14 @@
import ipaddress
+from copy import copy
+
from apex.common.constants import (
- PUBLIC_NETWORK,
- PRIVATE_NETWORK,
+ EXTERNAL_NETWORK,
+ TENANT_NETWORK,
STORAGE_NETWORK,
- API_NETWORK)
+ API_NETWORK,
+ CONTROLLER)
from apex.network_settings import NetworkSettings
from apex.network_environment import (
NetworkEnvironment,
@@ -33,6 +36,12 @@ class TestNetworkEnvironment(object):
@classmethod
def setup_class(klass):
"""This method is run once for each class before any tests are run"""
+ klass.ns = NetworkSettings(
+ '../config/network/network_settings.yaml')
+ klass.ns_vlans = NetworkSettings(
+ '../config/network/network_settings_vlans.yaml')
+ klass.ns_ipv6 = NetworkSettings(
+ '../config/network/network_settings_v6.yaml')
@classmethod
def teardown_class(klass):
@@ -48,84 +57,108 @@ class TestNetworkEnvironment(object):
assert_raises(NetworkEnvException, NetworkEnvironment,
None, '../build/network-environment.yaml')
- def test_netenv_settings_public_network(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
+ def test_netenv_settings_external_network_vlans(self):
# test vlans
- ns[PUBLIC_NETWORK]['vlan'] = 100
- ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+ ne = NetworkEnvironment(self.ns_vlans,
+ '../build/network-environment.yaml')
assert_equal(ne['parameter_defaults']['NeutronExternalNetworkBridge'],
'""')
- assert_equal(ne['parameter_defaults']['ExternalNetworkVlanID'], 100)
+ assert_equal(ne['parameter_defaults']['ExternalNetworkVlanID'], 501)
+ def test_netenv_settings_external_network_ipv6(self):
# Test IPv6
- ns[PUBLIC_NETWORK]['cidr'] = ipaddress.ip_network('::1/128')
- ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+ ne = NetworkEnvironment(self.ns_ipv6,
+ '../build/network-environment.yaml')
regstr = ne['resource_registry'][next(iter(EXTERNAL_RESOURCES.keys()))]
assert_equal(regstr.split('/')[-1], 'external_v6.yaml')
- def test_netenv_settings_private_network(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
- # test vlans
- ns[PRIVATE_NETWORK]['vlan'] = 100
+ def test_netenv_settings_external_network_removed(self):
+ ns = copy(self.ns)
+ # Test removing EXTERNAL_NETWORK
+ ns.enabled_network_list.remove(EXTERNAL_NETWORK)
ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
- assert_equal(ne['parameter_defaults']['TenantNetworkVlanID'], 100)
-
- # Test IPv6
- ns[PRIVATE_NETWORK]['cidr'] = ipaddress.ip_network('::1/128')
- ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
- regstr = ne['resource_registry'][next(iter(TENANT_RESOURCES.keys()))]
- assert_equal(regstr.split('/')[-1], 'tenant_v6.yaml')
+ regstr = ne['resource_registry'][next(iter(EXTERNAL_RESOURCES.keys()))]
+ assert_equal(regstr.split('/')[-1], 'noop.yaml')
- # Test removing PRIVATE_NETWORK
- ns.enabled_network_list.remove(PRIVATE_NETWORK)
+ def test_netenv_settings_tenant_network_vlans(self):
+ # test vlans
+ ne = NetworkEnvironment(self.ns_vlans,
+ '../build/network-environment.yaml')
+ assert_equal(ne['parameter_defaults']['TenantNetworkVlanID'], 401)
+
+# Apex is does not support v6 tenant networks
+# Though there is code that would fire if a
+# v6 cidr was passed in, just uncomment this to
+# cover that code
+# def test_netenv_settings_tenant_network_v6(self):
+# # Test IPv6
+# ne = NetworkEnvironment(self.ns_ipv6,
+# '../build/network-environment.yaml')
+# regstr = ne['resource_registry'][next(iter(TENANT_RESOURCES.keys()))]
+# assert_equal(regstr.split('/')[-1], 'tenant_v6.yaml')
+
+ def test_netenv_settings_tenant_network_removed(self):
+ ns = copy(self.ns)
+ # Test removing TENANT_NETWORK
+ ns.enabled_network_list.remove(TENANT_NETWORK)
ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
regstr = ne['resource_registry'][next(iter(TENANT_RESOURCES.keys()))]
assert_equal(regstr.split('/')[-1], 'noop.yaml')
- def test_netenv_settings_storage_network(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
+ def test_netenv_settings_storage_network_vlans(self):
# test vlans
- ns[STORAGE_NETWORK]['vlan'] = 100
- ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
- assert_equal(ne['parameter_defaults']['StorageNetworkVlanID'], 100)
+ ne = NetworkEnvironment(self.ns_vlans,
+ '../build/network-environment.yaml')
+ assert_equal(ne['parameter_defaults']['StorageNetworkVlanID'], 201)
+ def test_netenv_settings_storage_network_v6(self):
# Test IPv6
- ns[STORAGE_NETWORK]['cidr'] = ipaddress.ip_network('::1/128')
- ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+ ne = NetworkEnvironment(self.ns_ipv6,
+ '../build/network-environment.yaml')
regstr = ne['resource_registry'][next(iter(STORAGE_RESOURCES.keys()))]
assert_equal(regstr.split('/')[-1], 'storage_v6.yaml')
+ def test_netenv_settings_storage_network_removed(self):
+ ns = copy(self.ns)
# Test removing STORAGE_NETWORK
ns.enabled_network_list.remove(STORAGE_NETWORK)
ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
regstr = ne['resource_registry'][next(iter(STORAGE_RESOURCES.keys()))]
assert_equal(regstr.split('/')[-1], 'noop.yaml')
- def test_netenv_settings_api_network(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
+ def test_netenv_settings_api_network_v4(self):
+ ns = copy(self.ns_vlans)
+ ns['networks'][API_NETWORK]['enabled'] = True
+ ns['networks'][API_NETWORK]['cidr'] = '10.11.12.0/24'
+ ns = NetworkSettings(ns)
# test vlans
- ns.enabled_network_list.append(API_NETWORK)
- ns[API_NETWORK] = {'vlan': 100,
- 'cidr': ipaddress.ip_network('10.10.10.0/24'),
- 'usable_ip_range': '10.10.10.10,10.10.10.100'}
ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
- assert_equal(ne['parameter_defaults']['InternalApiNetworkVlanID'], 100)
+ assert_equal(ne['parameter_defaults']['InternalApiNetworkVlanID'], 101)
- # Test IPv6
- ns[API_NETWORK]['cidr'] = ipaddress.ip_network('::1/128')
+ def test_netenv_settings_api_network_vlans(self):
+ ns = copy(self.ns_vlans)
+ ns['networks'][API_NETWORK]['enabled'] = True
+ ns = NetworkSettings(ns)
+ # test vlans
ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+ assert_equal(ne['parameter_defaults']['InternalApiNetworkVlanID'], 101)
+
+ def test_netenv_settings_api_network_v6(self):
+ # Test IPv6
+ ne = NetworkEnvironment(self.ns_ipv6,
+ '../build/network-environment.yaml')
regstr = ne['resource_registry'][next(iter(API_RESOURCES.keys()))]
assert_equal(regstr.split('/')[-1], 'internal_api_v6.yaml')
- # Test removing API_NETWORK
- ns.enabled_network_list.remove(API_NETWORK)
+ def test_netenv_settings_api_network_removed(self):
+ ns = copy(self.ns)
+ # API_NETWORK is not in the default network settings file
ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
regstr = ne['resource_registry'][next(iter(API_RESOURCES.keys()))]
assert_equal(regstr.split('/')[-1], 'noop.yaml')
def test_numa_configs(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
- ne = NetworkEnvironment(ns, '../build/network-environment.yaml',
+ ne = NetworkEnvironment(self.ns, '../build/network-environment.yaml',
compute_pre_config=True,
controller_pre_config=True)
assert_is_instance(ne, dict)
diff --git a/tests/test_apex_network_settings.py b/tests/test_apex_network_settings.py
index ff61cc4b..955c0cf7 100644
--- a/tests/test_apex_network_settings.py
+++ b/tests/test_apex_network_settings.py
@@ -7,6 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from apex.common.constants import (
+ EXTERNAL_NETWORK,
+ STORAGE_NETWORK,
+ ADMIN_NETWORK,
+)
+
from apex.network_settings import (
NetworkSettings,
NetworkSettingsException,
@@ -18,6 +24,8 @@ from nose.tools import (
assert_raises
)
+files_dir = '../config/network/'
+
class TestNetworkSettings(object):
@classmethod
@@ -35,48 +43,118 @@ class TestNetworkSettings(object):
"""This method is run once after _each_ test method is executed"""
def test_init(self):
- NetworkSettings('../config/network/network_settings.yaml', True)
+ assert_is_instance(
+ NetworkSettings(files_dir+'network_settings.yaml'),
+ NetworkSettings)
+
+ def test_init_vlans(self):
+ assert_is_instance(
+ NetworkSettings(files_dir+'network_settings_vlans.yaml'),
+ NetworkSettings)
+
+# TODO, v6 test is stuck
+ # def test_init_v6(self):
+ # assert_is_instance(
+ # NetworkSettings(files_dir+'network_settings_v6.yaml', True),
+ # NetworkSettings)
+
+ def test_init_admin_disabled_or_missing(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ # remove admin, apex section will re-add it
+ ns['networks'].pop('admin', None)
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+ # remove admin and apex
+ ns.pop('apex', None)
+ ns['networks'].pop('admin', None)
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+
+ def test_init_collapse_storage(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ # remove storage
+ ns['networks'].pop('storage', None)
+ assert_is_instance(NetworkSettings(ns), NetworkSettings)
+
+ def test_init_missing_dns_domain(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ # remove storage
+ ns.pop('dns-domain', None)
+ assert_is_instance(NetworkSettings(ns), NetworkSettings)
def test_dump_bash(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
+ ns = NetworkSettings('../config/network/network_settings.yaml')
assert_equal(ns.dump_bash(), None)
assert_equal(ns.dump_bash(path='/dev/null'), None)
def test_get_network_settings(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
- assert_is_instance(ns, dict)
+ ns = NetworkSettings('../config/network/network_settings.yaml')
+ assert_is_instance(ns, NetworkSettings)
for role in ['controller', 'compute']:
nic_index = 1
- for network in ['admin_network', 'private_network',
- 'public_network', 'storage_network']:
- nic = 'nic' + str(nic_index)
- assert_equal(ns.nics[role][network], nic)
- nic_index += 1
-
- def test_get_network_settings_unspecified_nics(self):
- ns = NetworkSettings(
- '../tests/config/network_settings_nics_not_specified.yaml',
- True)
- assert_is_instance(ns, dict)
- for role in ['controller', 'compute']:
- nic_index = 1
- for network in ['admin_network', 'private_network',
- 'public_network', 'storage_network']:
+ print(ns.nics)
+ for network in ns.enabled_network_list:
nic = 'nic' + str(nic_index)
assert_equal(ns.nics[role][network], nic)
nic_index += 1
def test_get_enabled_networks(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
- assert_is_instance(ns.get_enabled_networks(), list)
-
- def test_negative_network_settings(self):
- assert_raises(NetworkSettingsException, NetworkSettings,
- '../tests/config/network_settings_duplicate_nic.yaml',
- True)
- assert_raises(NetworkSettingsException, NetworkSettings,
- '../tests/config/network_settings_nic1_reserved.yaml',
- True)
- assert_raises(NetworkSettingsException, NetworkSettings,
- '../tests/config/network_settings_missing_required_nic'
- '.yaml', True)
+ ns = NetworkSettings('../config/network/network_settings.yaml')
+ assert_is_instance(ns.enabled_network_list, list)
+
+ def test_invalid_nic_members(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ storage_net_nicmap = ns['networks'][STORAGE_NETWORK]['nic_mapping']
+ # set duplicate nic
+ storage_net_nicmap['compute']['members'][0] = 'nic1'
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+ # remove nic members
+ storage_net_nicmap['compute']['members'] = []
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+
+ def test_missing_vlan(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ storage_net_nicmap = ns['networks'][STORAGE_NETWORK]['nic_mapping']
+ # remove vlan from storage net
+ storage_net_nicmap['compute'].pop('vlan', None)
+ assert_is_instance(NetworkSettings(ns), NetworkSettings)
+
+# TODO
+# need to manipulate interfaces some how
+# maybe for ip_utils to return something to pass this
+# def test_admin_auto_detect(self):
+# ns = NetworkSettings(files_dir+'network_settings.yaml')
+# # remove cidr to force autodetection
+# ns['networks'][ADMIN_NETWORK].pop('cidr', None)
+# assert_is_instance(NetworkSettings(ns), NetworkSettings)
+
+ def test_admin_fail_auto_detect(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ # remove cidr and installer_vm to fail autodetect
+ ns['networks'][ADMIN_NETWORK].pop('cidr', None)
+ ns['networks'][ADMIN_NETWORK].pop('installer_vm', None)
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+
+ def test_exception(self):
+ e = NetworkSettingsException("test")
+ print(e)
+ assert_is_instance(e, NetworkSettingsException)
+
+ def test_config_ip(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ # set the provisioner ip to None to force _gen_ip to generate one
+ ns['networks'][ADMIN_NETWORK]['installer_vm']['ip'] = None
+ ns['networks'][EXTERNAL_NETWORK][0]['installer_vm']['ip'] = None
+ # Now rebuild network settings object and check for repopulated values
+ ns = NetworkSettings(ns)
+ assert_equal(ns['networks'][ADMIN_NETWORK]['installer_vm']['ip'],
+ '192.0.2.1')
+ assert_equal(ns['networks'][EXTERNAL_NETWORK][0]['installer_vm']['ip'],
+ '192.168.37.1')
+
+ def test_config_gateway(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ # set the gateway ip to None to force _config_gateway to generate one
+ ns['networks'][EXTERNAL_NETWORK][0]['gateway'] = None
+ # Now rebuild network settings object and check for a repopulated value
+ ns = NetworkSettings(ns)
+ assert_equal(ns['networks'][EXTERNAL_NETWORK][0]['gateway'],
+ '192.168.37.1')
diff --git a/tests/test_apex_python_utils_py.py b/tests/test_apex_python_utils_py.py
index 237c5589..eb16f67d 100644
--- a/tests/test_apex_python_utils_py.py
+++ b/tests/test_apex_python_utils_py.py
@@ -7,7 +7,9 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import shutil
import sys
+import tempfile
from test_apex_ip_utils import get_default_gateway_linux
from apex_python_utils import main
@@ -57,11 +59,13 @@ class TestCommonUtils(object):
assert_equal(main(), None)
def test_parse_net_settings(self):
+ tmp_dir = tempfile.mkdtemp()
args = self.parser.parse_args(['parse-net-settings',
'-s', net_sets,
- '--flat',
+ '-td', tmp_dir,
'-e', net_env])
assert_equal(parse_net_settings(args), None)
+ shutil.rmtree(tmp_dir, ignore_errors=True)
def test_parse_deploy_settings(self):
args = self.parser.parse_args(['parse-deploy-settings',