summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/Makefile9
-rw-r--r--build/csit-environment.yaml7
-rw-r--r--build/enable_rt_kvm.yaml1
-rw-r--r--build/first-boot.yaml27
-rw-r--r--build/kvm4nfv-1st-boot.yaml11
-rw-r--r--build/network-environment.yaml58
-rw-r--r--build/opnfv-environment.yaml32
-rw-r--r--build/ovs-dpdk-preconfig.yaml16
-rw-r--r--build/virtual-environment.yaml1
-rwxr-xr-xci/test.sh5
-rw-r--r--config/build/build_settings.yaml5
-rw-r--r--config/deploy/deploy_settings.yaml85
-rw-r--r--config/deploy/os-nosdn-fdio-ha.yaml1
-rw-r--r--config/deploy/os-nosdn-fdio-noha.yaml1
-rw-r--r--config/deploy/os-nosdn-kvm-ha.yaml1
-rw-r--r--config/deploy/os-nosdn-kvm-noha.yaml1
-rw-r--r--config/deploy/os-nosdn-nofeature-ha.yaml1
-rw-r--r--config/deploy/os-nosdn-nofeature-noha.yaml1
-rw-r--r--config/deploy/os-nosdn-ovs_dpdk-ha.yaml1
-rw-r--r--config/deploy/os-nosdn-ovs_dpdk-noha.yaml1
-rw-r--r--config/deploy/os-nosdn-performance-ha.yaml1
-rw-r--r--config/deploy/os-ocl-nofeature-ha.yaml1
-rw-r--r--config/deploy/os-odl-bgpvpn-ha.yaml1
-rw-r--r--config/deploy/os-odl-bgpvpn-noha.yaml1
-rw-r--r--config/deploy/os-odl-csit-noha.yaml1
-rw-r--r--config/deploy/os-odl-fdio-ha.yaml1
-rw-r--r--config/deploy/os-odl-fdio-noha.yaml1
-rw-r--r--config/deploy/os-odl-gluon-noha.yaml3
-rw-r--r--config/deploy/os-odl-nofeature-ha.yaml1
-rw-r--r--config/deploy/os-odl-nofeature-noha.yaml1
-rw-r--r--config/deploy/os-odl-ovs_dpdk-ha.yaml1
-rw-r--r--config/deploy/os-odl-ovs_dpdk-noha.yaml1
-rw-r--r--config/deploy/os-odl_l2-fdio-ha.yaml1
-rw-r--r--config/deploy/os-odl_l2-fdio-noha.yaml1
-rw-r--r--config/deploy/os-odl_l2-sfc-noha.yaml1
-rw-r--r--config/deploy/os-odl_netvirt-fdio-noha.yaml1
-rw-r--r--config/deploy/os-onos-nofeature-ha.yaml1
-rw-r--r--config/deploy/os-onos-sfc-ha.yaml1
-rw-r--r--config/deploy/os-ovn-nofeature-noha.yaml1
-rw-r--r--config/inventory/intel_pod2_settings.yaml1
-rw-r--r--config/inventory/nokia_pod1_settings.yaml1
-rw-r--r--config/inventory/pod_example_settings.yaml1
-rw-r--r--config/network/network_settings.yaml305
-rw-r--r--config/network/network_settings_v6.yaml239
-rw-r--r--config/network/network_settings_vlans.yaml275
-rw-r--r--config/network/network_settings_vpp.yaml301
-rw-r--r--tests/config/inventory.yaml1
47 files changed, 901 insertions, 509 deletions
diff --git a/build/Makefile b/build/Makefile
index 2df4142..1d329fb 100644
--- a/build/Makefile
+++ b/build/Makefile
@@ -117,6 +117,15 @@ python-pep8-check:
pep8 ../lib/python
pep8 ../tests
+#############
+# YAMLLINT #
+#############
+
+.PHONY: yamllint
+yamllint:
+ @echo "Running yamllint against all .yaml files"
+ cd ../ && yamllint $(shell cd ../ && git ls-tree -r HEAD --name-only | grep 'yaml$$')
+
##################
# NETWORKING-VPP #
##################
diff --git a/build/csit-environment.yaml b/build/csit-environment.yaml
index d983321..3dcd6d9 100644
--- a/build/csit-environment.yaml
+++ b/build/csit-environment.yaml
@@ -1,5 +1,6 @@
-#Environment file used to list common parameters required for all deployment
-#types
+---
+# Environment file used to list common parameters required for all deployment
+# types
parameters:
# CloudDomain:
@@ -14,7 +15,7 @@ parameter_defaults:
OvercloudComputeFlavor: compute
controllerImage: overcloud-full
ExtraConfig:
- tripleo::ringbuilder::build_ring: False
+ tripleo::ringbuilder::build_ring: false
nova::nova_public_key:
type: 'ssh-rsa'
replace_public_key:
diff --git a/build/enable_rt_kvm.yaml b/build/enable_rt_kvm.yaml
index 0059021..4601fd4 100644
--- a/build/enable_rt_kvm.yaml
+++ b/build/enable_rt_kvm.yaml
@@ -1,3 +1,4 @@
+---
parameter_defaults:
ComputeKernelArgs: 'kvmfornfv_kernel.rpm'
resource_registry:
diff --git a/build/first-boot.yaml b/build/first-boot.yaml
index d4ac5df..1e8921b 100644
--- a/build/first-boot.yaml
+++ b/build/first-boot.yaml
@@ -1,3 +1,4 @@
+---
heat_template_version: 2014-10-16
description: >
@@ -9,8 +10,10 @@ parameters:
ComputeKernelArgs:
description: >
Space seprated list of Kernel args to be update to grub.
- The given args will be appended to existing args of GRUB_CMDLINE_LINUX in file /etc/default/grub
- Example: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+ The given args will be appended to existing args of
+ GRUB_CMDLINE_LINUX in file /etc/default/grub
+ Example:
+ "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
type: string
default: ""
@@ -19,7 +22,7 @@ resources:
type: OS::Heat::MultipartMime
properties:
parts:
- - config: {get_resource: compute_kernel_args}
+ - config: {get_resource: compute_kernel_args}
# Verify the logs on /var/log/cloud-init.log on the overcloud node
compute_kernel_args:
@@ -30,24 +33,24 @@ resources:
template: |
#!/bin/bash
set -x
- sed 's/^\(GRUB_CMDLINE_LINUX=".*\)"/\1 $KERNEL_ARGS"/g' -i /etc/default/grub ;
+ sed 's/^\(GRUB_CMDLINE_LINUX=".*\)"/\1 $KERNEL_ARGS"/g' \
+ -i /etc/default/grub ;
grub2-mkconfig -o /etc/grub2.cfg
- hugepage_count=`echo $KERNEL_ARGS | grep -oP ' ?hugepages=\K[0-9]+'`
+ hugepage_count=$(echo $KERNEL_ARGS | \
+ grep -oP ' ?hugepages=\K[0-9]+')
if [ -z "$hugepage_count" ]; then
hugepage_count=1024
fi
echo vm.hugetlb_shm_group=0 >> /usr/lib/sysctl.d/00-system.conf
- echo vm.max_map_count=$(printf "%.0f" $(echo 2.2*$hugepage_count | bc)) >> /usr/lib/sysctl.d/00-system.conf
- echo kernel.shmmax=$(($hugepage_count * 2 * 1024 * 1024)) >> /usr/lib/sysctl.d/00-system.conf
- rm -f /etc/sysconfig/network-scripts/ifcfg-*
+ HPAGE_CT=$(printf "%.0f" $(echo 2.2*$hugepage_count | bc))
+ echo vm.max_map_count=$HPAGE_CT >> /usr/lib/sysctl.d/00-system.conf
+ HPAGE_CT=$(($hugepage_count * 2 * 1024 * 1024))
+ echo kernel.shmmax=$HPAGE_CT >> /usr/lib/sysctl.d/00-system.conf
+
reboot
params:
$KERNEL_ARGS: {get_param: ComputeKernelArgs}
outputs:
- # This means get_resource from the parent template will get the userdata, see:
- # http://docs.openstack.org/developer/heat/template_guide/composition.html#making-your-template-resource-more-transparent
- # Note this is new-for-kilo, an alternative is returning a value then using
- # get_attr in the parent template instead.
OS::stack_id:
value: {get_resource: userdata}
diff --git a/build/kvm4nfv-1st-boot.yaml b/build/kvm4nfv-1st-boot.yaml
index 0dad3e3..4c1aefc 100644
--- a/build/kvm4nfv-1st-boot.yaml
+++ b/build/kvm4nfv-1st-boot.yaml
@@ -1,3 +1,4 @@
+---
heat_template_version: 2014-10-16
description: >
@@ -21,7 +22,7 @@ resources:
type: OS::Heat::MultipartMime
properties:
parts:
- - config: {get_resource: compute_kernel_args}
+ - config: {get_resource: compute_kernel_args}
# Verify the logs on /var/log/cloud-init.log on the overcloud node
compute_kernel_args:
@@ -36,8 +37,10 @@ resources:
if [[ -z $FORMAT ]] ; then
FORMAT="compute" ;
else
- # Assumption: only %index% and %stackname% are the variables in Host name format
- FORMAT=$(echo $FORMAT | sed 's/\%index\%//g' | sed 's/\%stackname\%//g') ;
+ # Assumption: only %index% and %stackname% are
+ # the variables in Host name format
+ FORMAT=$(echo $FORMAT | sed 's/\%index\%//g');
+ FORMAT=$(echo $FORMAT | sed 's/\%stackname\%//g');
fi
if [[ $(hostname) == *$FORMAT* ]] ; then
yum install -y /root/$KVMFORNFV_KERNEL_RPM
@@ -56,5 +59,3 @@ outputs:
# get_attr in the parent template instead.
OS::stack_id:
value: {get_resource: userdata}
-
-
diff --git a/build/network-environment.yaml b/build/network-environment.yaml
index 40f8e3b..cb6e019 100644
--- a/build/network-environment.yaml
+++ b/build/network-environment.yaml
@@ -1,39 +1,59 @@
+---
# Enable the creation of Neutron networks for isolated OvercloudV
# traffic and configure each role to assign ports (related
# to that role) on these networks.
# Many networks are disabled by default because they are not used
# in a typical configuration. Override via parameter_defaults.
resource_registry:
- OS::TripleO::Network::External: /usr/share/openstack-tripleo-heat-templates/network/external.yaml
+ OS::TripleO::Network::External: |-
+ /usr/share/openstack-tripleo-heat-templates/network/external.yaml
OS::TripleO::Network::InternalApi: OS::Heat::None
OS::TripleO::Network::StorageMgmt: OS::Heat::None
OS::TripleO::Network::Storage: OS::Heat::None
OS::TripleO::Network::Tenant: OS::Heat::None
# Management network is optional and disabled by default
- #OS::TripleO::Network::Management: /usr/share/openstack-tripleo-heat-templates/network/noop.yaml
+ # OS::TripleO::Network::Management:
+ # /usr/share/openstack-tripleo-heat-templates/network/noop.yaml
# Port assignments for the VIPs
- OS::TripleO::Network::Ports::ExternalVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml
- OS::TripleO::Network::Ports::InternalApiVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageMgmtVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
- OS::TripleO::Network::Ports::RedisVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/ctlplane_vip.yaml
+ OS::TripleO::Network::Ports::ExternalVipPort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/ctlplane_vip.yaml
# Port assignments for the controller role
- OS::TripleO::Controller::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml
- OS::TripleO::Controller::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
- OS::TripleO::Controller::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
- OS::TripleO::Controller::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
- OS::TripleO::Controller::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
- #OS::TripleO::Controller::Ports::ManagementPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
+ OS::TripleO::Controller::Ports::ExternalPort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml
+ OS::TripleO::Controller::Ports::InternalApiPort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
+ OS::TripleO::Controller::Ports::StoragePort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
+ OS::TripleO::Controller::Ports::StorageMgmtPort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
+ OS::TripleO::Controller::Ports::TenantPort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
+ # OS::TripleO::Controller::Ports::ManagementPort:
+ # /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
# Port assignments for the compute role
- OS::TripleO::Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
- OS::TripleO::Compute::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
- OS::TripleO::Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
- OS::TripleO::Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
- OS::TripleO::Compute::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml
- #OS::TripleO::Compute::Ports::ManagementPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::InternalApiPort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::StorageMgmtPort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::StoragePort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::TenantPort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
+ OS::TripleO::Compute::Ports::ExternalPort: |-
+ /usr/share/openstack-tripleo-heat-templates/network/ports/external.yaml
+ # OS::TripleO::Compute::Ports::ManagementPort:
+ # /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
# NIC assignments
OS::TripleO::Compute::Net::SoftwareConfig: nics/compute.yaml
diff --git a/build/opnfv-environment.yaml b/build/opnfv-environment.yaml
index 6289e2d..a861312 100644
--- a/build/opnfv-environment.yaml
+++ b/build/opnfv-environment.yaml
@@ -1,26 +1,28 @@
-#Environment file used to list common parameters required for all deployment
-#types
+---
+# Environment file used to list common parameters required for all deployment
+# types
parameters:
- # value updated via lib/overcloud-deploy-functions.sh
- #CloudDomain:
+ # # value updated via lib/overcloud-deploy-functions.sh
+ # CloudDomain:
parameter_defaults:
CeilometerStoreEvents: true
NeutronEnableForceMetadata: true
NeutronEnableDHCPMetadata: true
NeutronEnableIsolatedMetadata: true
- # the following parameters are given values via
- # lib/overcloud-deploy-functions.sh when they are used by a scenario
- #NeutronVPPAgentPhysnets: 'datacentre:GigabitEthernet2/2/0'
- #NovaSchedulerDefaultFilters:
- #ComputeKernelArgs:
- #PmdCoreList:
- #OvsDpdkCoreList:
- #OvsDpdkSocketMemory:
- #ControllerExtraConfig:
- #NovaComputeExtraConfig:
+ # NeutronVPPAgentPhysnets:
+ # NovaSchedulerDefaultFilters:
+ # # Kernel arguments, this value will be set to kernel arguments specified
+ # # for compute nodes in deploy setting file.
+ # ComputeKernelArgs:
+ # PmdCoreList:
+ # OvsDpdkCoreList:
+ # OvsDpdkSocketMemory:
+ # ControllerExtraConfig:
+ # NovaComputeExtraConfig:
ExtraConfig:
+ tripleo::ringbuilder::build_ring: false
nova::nova_public_key:
type: 'ssh-rsa'
replace_public_key:
@@ -34,7 +36,7 @@ parameter_defaults:
nova::api::default_floating_pool: 'external'
# VPP routing node, used for odl-fdio only.
# value updated via lib/overcloud-deploy-functions.sh
- #opendaylight::vpp_routing_node: overcloud-novacompute-0.opnfvlf.org
+ # opendaylight::vpp_routing_node: overcloud-novacompute-0.opnfvlf.org
ControllerServices:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephMon
diff --git a/build/ovs-dpdk-preconfig.yaml b/build/ovs-dpdk-preconfig.yaml
index a4663ff..91cd6e1 100644
--- a/build/ovs-dpdk-preconfig.yaml
+++ b/build/ovs-dpdk-preconfig.yaml
@@ -1,3 +1,4 @@
+---
heat_template_version: 2014-10-16
description: >
@@ -27,7 +28,7 @@ resources:
OvsDpdkSetup:
type: OS::Heat::StructuredDeployment
properties:
- server: {get_param: server}
+ server: {get_param: server}
config: {get_resource: OvsDpdkConfig}
OvsDpdkConfig:
@@ -81,13 +82,16 @@ resources:
sed -i "s/#group\s*=.*/group = \"root\"/" /etc/libvirt/qemu.conf
ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true
if [ -n "$SOCKET_MEMORY" ]; then
- ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=$SOCKET_MEMORY
+ other_config="dpdk-socket-mem=$SOCKET_MEMORY"
+ ovs-vsctl --no-wait set Open_vSwitch . other_config:$other_config
fi
- if [[ -n "$pmd_cpu_mask" && -n "$PMD_CORES" ]]; then
- ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=$pmd_cpu_mask
+ if [ -n "$pmd_cpu_mask" ]; then
+ other_config="pmd-cpu-mask=$pmd_cpu_mask"
+ ovs-vsctl --no-wait set Open_vSwitch . other_config:$other_config
fi
- if [ -n "$dpdk_lcore_mask" && -n "$DPDK_CORES" ]]; then
- ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=$dpdk_lcore_mask
+ if [ -n "$dpdk_lcore_mask" ]; then
+ other_config="dpdk-lcore-mask=$dpdk_lcore_mask"
+ ovs-vsctl --no-wait set Open_vSwitch . other_config:$other_config
fi
systemctl restart openvswitch
diff --git a/build/virtual-environment.yaml b/build/virtual-environment.yaml
index f87879c..399a312 100644
--- a/build/virtual-environment.yaml
+++ b/build/virtual-environment.yaml
@@ -1,3 +1,4 @@
+---
parameter_defaults:
controllerExtraConfig:
heat::api_cloudwatch::enabled: false
diff --git a/ci/test.sh b/ci/test.sh
index 3e538ff..bae2109 100755
--- a/ci/test.sh
+++ b/ci/test.sh
@@ -11,7 +11,7 @@
set -e
# Make sure python dependencies are installed
-for pkg in iproute epel-release python34-devel python34-nose python34-PyYAML python-pep8 python34-mock; do
+for pkg in yamllint iproute epel-release python34-devel python34-nose python34-PyYAML python-pep8 python34-mock; do
if ! rpm -q ${pkg} > /dev/null; then
if ! sudo yum install -y ${pkg}; then
echo "Failed to install ${pkg} package..."
@@ -24,6 +24,7 @@ done
if ! python3 -c "import coverage" &> /dev/null; then sudo easy_install-3.4 coverage; fi
pushd ../build/ > /dev/null
-make python-tests
make python-pep8-check
+make yamllint
+make python-tests
popd > /dev/null
diff --git a/config/build/build_settings.yaml b/config/build/build_settings.yaml
index 93ac944..5fd7a4e 100644
--- a/config/build/build_settings.yaml
+++ b/config/build/build_settings.yaml
@@ -1,3 +1,4 @@
+---
build_params:
opendaylight: true
onos: false
@@ -7,7 +8,9 @@ build_params:
opendaylight_config:
name: "OpenDaylight SFC"
method: zip
- location: https://www.dropbox.com/s/6w76eo7loltvvb5/openstack.net-virt-sfc-karaf-1.2.1-SNAPSHOT.zip
+ location: >-
+ https://www.dropbox.com/s/6w76eo7loltvvb5/
+ openstack.net-virt-sfc-karaf-1.2.1-SNAPSHOT.zip
onos_config:
name: "ONOS Base Controller"
diff --git a/config/deploy/deploy_settings.yaml b/config/deploy/deploy_settings.yaml
index 3186a5d..7406fe6 100644
--- a/config/deploy/deploy_settings.yaml
+++ b/config/deploy/deploy_settings.yaml
@@ -1,3 +1,4 @@
+---
# The only global parameter at this time is ha_enabled, which will use
# the tripleo ha architecture described here:
# https://github.com/beekhof/osp-ha-deploy/blob/master/HA-keepalived.md
@@ -16,10 +17,10 @@ deploy_options:
# Which version of ODL to use. This is only valid if 'opendaylight' was used
# above. Valid options are 'carbon'. If no value
# is specified, carbon will be used.
- #odl_version: carbon
+ # odl_version: carbon
- # Whether to configure ODL L3 support. This will disable the Neutron L3 Agent and
- # use ODL instead.
+ # Whether to configure ODL L3 support. This will disable the Neutron L3 Agent
+ # and use ODL instead.
sdn_l3: false
# Whether to install and configure Tacker (VNF Manager)
@@ -29,9 +30,9 @@ deploy_options:
# Note: Congress is already installed by default
congress: false
- # Whether to configure ODL or ONOS with Service Function Chaining support. This
- # requires the opnfv-apex-opendaylight-sfc package to be installed, since it
- # uses a different overcloud image.
+ # Whether to configure ODL or ONOS with Service Function Chaining support.
+ # This requires the opnfv-apex-opendaylight-sfc package to be installed,
+ # since it uses a different overcloud image.
sfc: false
# Whether to configure ODL with SDNVPN support.
@@ -39,7 +40,7 @@ deploy_options:
# Which dataplane to use for overcloud tenant networks. Valid options are
# 'ovs', 'ovs_dpdk' and 'fdio'.
- dataplane : ovs
+ dataplane: ovs
# Whether to run the kvm real time kernel (rt_kvm) in the compute node(s) to
# reduce the network latencies caused by network function virtualization
@@ -50,48 +51,50 @@ deploy_options:
vpp: false
# Whether to run vsperf after the install has completed
- #vsperf: false
+ # vsperf: false
# Specify a device for ceph to use for the OSDs. By default a virtual disk
# is created for the OSDs. This setting allows you to specify a different
# target for the OSDs. The setting must be valid on all overcloud nodes.
# The controllers and the compute nodes all have OSDs setup on them and
# therefore this device name must be valid on all overcloud nodes.
- #ceph_device: /dev/sdb
+ # ceph_device: /dev/sdb
- # Set performance options on specific roles. The valid roles are 'Compute', 'Controller'
- # and 'Storage', and the valid sections are 'kernel' and 'nova'
- #performance:
- # Controller:
- # kernel:
- # # In this example, these three settings will be passed to the kernel boot line.
- # # Any key/value pair can be entered here, so care should be taken to ensure that machines
- # # do not fail to boot.
- # #
- # # isolcpus is generally used to push host processes off a particular core,
- # # so that it can be dedicated to a specific process. On control nodes
- # # this could be an ovs_dpdk process.
- # isolcpus: 1
- # # Hugepages are required for ovs_dpdk support.
- # hugepage: 2M
- # # intel_iommu is also required for ovs_dpdk support.
- # intel_iommu: 'on'
- # Compute:
- # nova:
- # # This is currently the only available option in the nova section. It will
- # # add the provided string to vcpu_pin_set in nova.conf. This is used to pin
- # # guest VMs to a set of CPU cores, and is decsribed in more detail here:
- # # http://docs.openstack.org/ocata/config-reference/compute/config-options.html
- # libvirtpin: 1
- # kernel:
- # # On compute nodes, isolcpus is usually used to reserve cores for use either by VMs
- # # or ovs_dpdk
- # isolcpus: 0
- # hugepage: 2M
- # intel_iommu: 'on'
+ # Set performance options on specific roles. The valid roles are 'Compute',
+ # 'Controller' and 'Storage', and the valid sections are 'kernel' and 'nova'
+ # performance:
+ # Controller:
+ # kernel:
+ # # In this example, these three settings will be passed to the kernel
+ # # boot line. Any key/value pair can be entered here, so care should
+ # # be taken to ensure that machines do not fail to boot.
+ # #
+ # # isolcpus is generally used to push host processes off a particular
+ # # core, so that it can be dedicated to a specific process. On control
+ # # nodes this could be an ovs_dpdk process.
+ # isolcpus: 1
+ # # Hugepages are required for ovs_dpdk support.
+ # hugepage: 2M
+ # # intel_iommu is also required for ovs_dpdk support.
+ # intel_iommu: 'on'
+ # Compute:
+ # nova:
+ # # This is currently the only available option in the nova section. It
+ # # will add the provided string to vcpu_pin_set in nova.conf. This is
+ # # used to pin guest VMs to a set of CPU cores, and is decsribed in
+ # # more detail here:
+ # # http://docs.openstack.org
+ # # /ocata/config-reference/compute/config-options.html
+ # libvirtpin: 1
+ # kernel:
+ # # On compute nodes, isolcpus is usually used to reserve cores for use
+ # # either by VMs or ovs_dpdk
+ # isolcpus: 0
+ # hugepage: 2M
+ # intel_iommu: 'on'
# Set yardstick option to install yardstick
- #yardstick: false
+ # yardstick: false
# Set dovetail option to install dovetail
- #dovetail: false
+ # dovetail: false
diff --git a/config/deploy/os-nosdn-fdio-ha.yaml b/config/deploy/os-nosdn-fdio-ha.yaml
index 3f0f8a2..d8af12d 100644
--- a/config/deploy/os-nosdn-fdio-ha.yaml
+++ b/config/deploy/os-nosdn-fdio-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-nosdn-fdio-noha.yaml b/config/deploy/os-nosdn-fdio-noha.yaml
index 5f27bed..446ddfd 100644
--- a/config/deploy/os-nosdn-fdio-noha.yaml
+++ b/config/deploy/os-nosdn-fdio-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/deploy/os-nosdn-kvm-ha.yaml b/config/deploy/os-nosdn-kvm-ha.yaml
index cb68a31..b01a835 100644
--- a/config/deploy/os-nosdn-kvm-ha.yaml
+++ b/config/deploy/os-nosdn-kvm-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-nosdn-kvm-noha.yaml b/config/deploy/os-nosdn-kvm-noha.yaml
index bf737d7..a9c8d97 100644
--- a/config/deploy/os-nosdn-kvm-noha.yaml
+++ b/config/deploy/os-nosdn-kvm-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/deploy/os-nosdn-nofeature-ha.yaml b/config/deploy/os-nosdn-nofeature-ha.yaml
index 286b516..3c26b83 100644
--- a/config/deploy/os-nosdn-nofeature-ha.yaml
+++ b/config/deploy/os-nosdn-nofeature-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-nosdn-nofeature-noha.yaml b/config/deploy/os-nosdn-nofeature-noha.yaml
index 2d1b44a..2cd6633 100644
--- a/config/deploy/os-nosdn-nofeature-noha.yaml
+++ b/config/deploy/os-nosdn-nofeature-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/deploy/os-nosdn-ovs_dpdk-ha.yaml b/config/deploy/os-nosdn-ovs_dpdk-ha.yaml
index c9d5867..8e31221 100644
--- a/config/deploy/os-nosdn-ovs_dpdk-ha.yaml
+++ b/config/deploy/os-nosdn-ovs_dpdk-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-nosdn-ovs_dpdk-noha.yaml b/config/deploy/os-nosdn-ovs_dpdk-noha.yaml
index cf9b854..f28b0c4 100644
--- a/config/deploy/os-nosdn-ovs_dpdk-noha.yaml
+++ b/config/deploy/os-nosdn-ovs_dpdk-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/deploy/os-nosdn-performance-ha.yaml b/config/deploy/os-nosdn-performance-ha.yaml
index acbc1d3..f5d0656 100644
--- a/config/deploy/os-nosdn-performance-ha.yaml
+++ b/config/deploy/os-nosdn-performance-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-ocl-nofeature-ha.yaml b/config/deploy/os-ocl-nofeature-ha.yaml
index 4ad1ee2..bc05fa4 100644
--- a/config/deploy/os-ocl-nofeature-ha.yaml
+++ b/config/deploy/os-ocl-nofeature-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-odl-bgpvpn-ha.yaml b/config/deploy/os-odl-bgpvpn-ha.yaml
index 76d051e..b55c459 100644
--- a/config/deploy/os-odl-bgpvpn-ha.yaml
+++ b/config/deploy/os-odl-bgpvpn-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-odl-bgpvpn-noha.yaml b/config/deploy/os-odl-bgpvpn-noha.yaml
index d569ba6..babb931 100644
--- a/config/deploy/os-odl-bgpvpn-noha.yaml
+++ b/config/deploy/os-odl-bgpvpn-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/deploy/os-odl-csit-noha.yaml b/config/deploy/os-odl-csit-noha.yaml
index d5e2695..67bdec1 100644
--- a/config/deploy/os-odl-csit-noha.yaml
+++ b/config/deploy/os-odl-csit-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/deploy/os-odl-fdio-ha.yaml b/config/deploy/os-odl-fdio-ha.yaml
index af81e5a..cb7643d 100644
--- a/config/deploy/os-odl-fdio-ha.yaml
+++ b/config/deploy/os-odl-fdio-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-odl-fdio-noha.yaml b/config/deploy/os-odl-fdio-noha.yaml
index adb37f3..12c1fa6 100644
--- a/config/deploy/os-odl-fdio-noha.yaml
+++ b/config/deploy/os-odl-fdio-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/deploy/os-odl-gluon-noha.yaml b/config/deploy/os-odl-gluon-noha.yaml
index 12e5d25..03ca103 100644
--- a/config/deploy/os-odl-gluon-noha.yaml
+++ b/config/deploy/os-odl-gluon-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
@@ -9,4 +10,4 @@ deploy_options:
congress: true
sfc: false
vpn: true
- gluon: true \ No newline at end of file
+ gluon: true
diff --git a/config/deploy/os-odl-nofeature-ha.yaml b/config/deploy/os-odl-nofeature-ha.yaml
index 64af606..c4bce1c 100644
--- a/config/deploy/os-odl-nofeature-ha.yaml
+++ b/config/deploy/os-odl-nofeature-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-odl-nofeature-noha.yaml b/config/deploy/os-odl-nofeature-noha.yaml
index d95ed9c..0c9b4fb 100644
--- a/config/deploy/os-odl-nofeature-noha.yaml
+++ b/config/deploy/os-odl-nofeature-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/deploy/os-odl-ovs_dpdk-ha.yaml b/config/deploy/os-odl-ovs_dpdk-ha.yaml
index 8e3674a..8bf38e2 100644
--- a/config/deploy/os-odl-ovs_dpdk-ha.yaml
+++ b/config/deploy/os-odl-ovs_dpdk-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-odl-ovs_dpdk-noha.yaml b/config/deploy/os-odl-ovs_dpdk-noha.yaml
index 1711fc6..6d6be98 100644
--- a/config/deploy/os-odl-ovs_dpdk-noha.yaml
+++ b/config/deploy/os-odl-ovs_dpdk-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/deploy/os-odl_l2-fdio-ha.yaml b/config/deploy/os-odl_l2-fdio-ha.yaml
index d4a86b0..68777db 100644
--- a/config/deploy/os-odl_l2-fdio-ha.yaml
+++ b/config/deploy/os-odl_l2-fdio-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-odl_l2-fdio-noha.yaml b/config/deploy/os-odl_l2-fdio-noha.yaml
index b9e0cea..ab23e97 100644
--- a/config/deploy/os-odl_l2-fdio-noha.yaml
+++ b/config/deploy/os-odl_l2-fdio-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/deploy/os-odl_l2-sfc-noha.yaml b/config/deploy/os-odl_l2-sfc-noha.yaml
index 4423b17..8104e7e 100644
--- a/config/deploy/os-odl_l2-sfc-noha.yaml
+++ b/config/deploy/os-odl_l2-sfc-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/deploy/os-odl_netvirt-fdio-noha.yaml b/config/deploy/os-odl_netvirt-fdio-noha.yaml
index fa6ba80..6cf8c1a 100644
--- a/config/deploy/os-odl_netvirt-fdio-noha.yaml
+++ b/config/deploy/os-odl_netvirt-fdio-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/deploy/os-onos-nofeature-ha.yaml b/config/deploy/os-onos-nofeature-ha.yaml
index 0d083dd..542f6df 100644
--- a/config/deploy/os-onos-nofeature-ha.yaml
+++ b/config/deploy/os-onos-nofeature-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-onos-sfc-ha.yaml b/config/deploy/os-onos-sfc-ha.yaml
index f0b7851..2d68573 100644
--- a/config/deploy/os-onos-sfc-ha.yaml
+++ b/config/deploy/os-onos-sfc-ha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: true
diff --git a/config/deploy/os-ovn-nofeature-noha.yaml b/config/deploy/os-ovn-nofeature-noha.yaml
index e03ac0a..d2702c9 100644
--- a/config/deploy/os-ovn-nofeature-noha.yaml
+++ b/config/deploy/os-ovn-nofeature-noha.yaml
@@ -1,3 +1,4 @@
+---
global_params:
ha_enabled: false
diff --git a/config/inventory/intel_pod2_settings.yaml b/config/inventory/intel_pod2_settings.yaml
index 1552041..ff5735f 100644
--- a/config/inventory/intel_pod2_settings.yaml
+++ b/config/inventory/intel_pod2_settings.yaml
@@ -1,3 +1,4 @@
+---
nodes:
node1:
mac_address: "00:1e:67:b2:08:7b"
diff --git a/config/inventory/nokia_pod1_settings.yaml b/config/inventory/nokia_pod1_settings.yaml
index 7cf1c04..337b048 100644
--- a/config/inventory/nokia_pod1_settings.yaml
+++ b/config/inventory/nokia_pod1_settings.yaml
@@ -1,3 +1,4 @@
+---
nodes:
node1:
mac_address: "54:AB:3A:24:3B:E1"
diff --git a/config/inventory/pod_example_settings.yaml b/config/inventory/pod_example_settings.yaml
index c08b30c..d7e5c84 100644
--- a/config/inventory/pod_example_settings.yaml
+++ b/config/inventory/pod_example_settings.yaml
@@ -1,3 +1,4 @@
+---
nodes:
node1:
mac_address: "10:23:45:67:89:AB"
diff --git a/config/network/network_settings.yaml b/config/network/network_settings.yaml
index ba78439..8875334 100644
--- a/config/network/network_settings.yaml
+++ b/config/network/network_settings.yaml
@@ -1,3 +1,4 @@
+---
# This configuration file defines Network Environment for a
# Baremetal Deployment of OPNFV. It contains default values
# for 5 following networks:
@@ -34,7 +35,7 @@
# communication. This should be used for IPv6 deployments.
-#Meta data for the network configuration
+# Meta data for the network configuration
network-config-metadata:
title: LF-POD-1 Network config
version: 0.1
@@ -57,153 +58,248 @@ syslog:
transport: 'tcp'
# Common network settings
-networks: # Network configurations
- admin: # Admin configuration (pxe and jumpstart),
+networks:
+ # Admin configuration (pxe and jumpstart)
+ admin:
enabled: true
- installer_vm: # Network settings for the Installer VM on admin network
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ # Network settings for the Installer VM on admin network
+ installer_vm:
+ # Indicates if this VM will be bridged to an interface, or to a bond
+ nic_type: interface
+ # Interfaces to bridge for installer VM (use multiple values for bond)
members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- vlan: native # VLAN tag to use for this network on Installer VM, native means none
- ip: 192.0.2.1 # IP to assign to Installer VM on this network
+ - em1
+ # VLAN tag to use for this network on Installer VM, native means none
+ vlan: native
+ # IP to assign to Installer VM on this network
+ ip: 192.0.2.1
+ # Usable ip range for the overcloud node IPs (including VIPs)
+ # Last IP is used for host bridge (i.e. br-admin).
+ # If empty entire range is usable.
+ # Cannot overlap with dhcp_range or introspection_range.
overcloud_ip_range:
- 192.0.2.11
- - 192.0.2.99 # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be
- # used for host bridge (i.e. br-admin). If empty entire range is usable.
- # Cannot overlap with dhcp_range or introspection_range.
- gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
- cidr: 192.0.2.0/24 # Subnet in CIDR format 192.168.1.0/24
+ - 192.0.2.99
+ # Gateway (only needed when public_network is disabled)
+ gateway: 192.0.2.1
+ # Subnet in CIDR format 192.168.1.0/24
+ cidr: 192.0.2.0/24
+ # DHCP range for the admin network, automatically provisioned if empty
dhcp_range:
- 192.0.2.2
- - 192.0.2.10 # DHCP range for the admin network, if empty it will be automatically provisioned
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - 192.0.2.10
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # Physical NIC members (Single value allowed for phys_type: interface)
+ members:
- eth0
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
phys_type: interface
members:
- eth0
- #
- tenant: # Tenant network configuration
+
+ # Tenant network configuration
+ tenant:
enabled: true
- cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
- mtu: 1500 # Tenant network MTU
- overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range:
- # VNI, VLAN-ID, etc.
- segmentation_type: vxlan # Tenant network segmentation type:
- # vlan, vxlan, gre
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- uio_driver: uio_pci_generic # UIO driver to use for DPDK scenarios. The value is ignored for non-DPDK scenarios.
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Subnet in CIDR format 192.168.1.0/24
+ cidr: 11.0.0.0/24
+ # Tenant network MTU
+ mtu: 1500
+ # Tenant network Overlay segmentation ID range:
+ # VNI, VLAN-ID, etc.
+ overlay_id_range: 2,65535
+
+ # Tenant network segmentation type:
+ # vlan, vxlan, gre
+ segmentation_type: vxlan
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface/bond)
+ phys_type: interface
+ # UIO driver to use for DPDK scenarios.
+ # The value is ignored for non-DPDK scenarios.
+ uio_driver: uio_pci_generic
+ # VLAN tag to use with this NIC
+ vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ members:
+ # Note logical name like nic1 not valid for fdio deployment yet.
+ - eth1
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ # Physical interface type (interface/bond)
phys_type: interface
vlan: native
+ # Note: logicial names like nic1 are not valid for fdio deployment yet.
members:
- - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- #
- external: # Can contain 1 or more external networks
- - public: # "public" network will be the network the installer VM attaches to
+ - eth1
+
+ # Can contain 1 or more external networks
+ external:
+ - public:
enabled: true
- mtu: 1500 # Public network MTU
- installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ # Public network MTU
+ mtu: 1500
+ # Network settings for the Installer VM on external network
+ # (note only valid on 'public' external network)
+ installer_vm:
+ # Indicates if this VM will be bridged to an interface, or to a bond
+ nic_type: interface
vlan: native
+ # Interfaces to bridge for installer VM (use multiple values for bond)
members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- ip: 192.168.37.1 # IP to assign to Installer VM on this network
+ - em1
+ # IP to assign to Installer VM on this network
+ ip: 192.168.37.1
cidr: 192.168.37.0/24
gateway: 192.168.37.1
+ # Range to allocate to floating IPs for the public network with Neutron
floating_ip_range:
- 192.168.37.200
- - 192.168.37.220 # Range to allocate to floating IPs for the public network with Neutron
+ - 192.168.37.220
+ # Usable ip range for the overcloud node IPs (including VIPs)
+ # Last IP will be used for host bridge (i.e. br-public).
+ # If empty entire range is usable.
+ # Cannot overlap with dhcp_range or introspection_range.
overcloud_ip_range:
- 192.168.37.10
- - 192.168.37.199 # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be used for host
- # bridge (i.e. br-public). If empty entire range is usable. Cannot overlap with dhcp_range or introspection_range.
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - 192.168.37.199
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # VLAN tag to use with this NIC
+ vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ members:
- eth2
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
phys_type: interface
vlan: native
members:
- eth2
- external_overlay: # External network to be created in OpenStack by Services tenant
- name: Public_internet
- type: flat
- gateway: 192.168.37.1
- - private_cloud: # another external network
+ # External network to be created in OpenStack by Services tenant
+ external_overlay:
+ name: Public_internet
+ type: flat
+ gateway: 192.168.37.1
+ # another external network
+ # This is an example and not yet supported
+ - private_cloud:
enabled: false
mtu: 1500
- installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ # Network settings for the Installer VM on external network
+ # note only valid on 'public' external network
+ installer_vm:
+ # Indicates if this VM will be bridged to an interface, or to a bond
+ nic_type: interface
vlan: 101
+ # Interfaces to bridge for installer VM (use multiple values for bond)
members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- ip: 192.168.38.1 # IP to assign to Installer VM on this network
+ - em1
+ # IP to assign to Installer VM on this network
+ ip: 192.168.38.1
cidr: 192.168.38.0/24
gateway: 192.168.38.1
+ # Range to allocate to floating IPs for the public network with Neutron
floating_ip_range:
- 192.168.38.200
- - 192.168.38.220 # Range to allocate to floating IPs for the public network with Neutron
+ - 192.168.38.220
+ # Usable IP range for overcloud nodes (including VIPs)i
+ # usually this is a shared subnet.
+ # Cannot overlap with dhcp_range or introspection_range.
overcloud_ip_range:
- 192.168.38.10
- - 192.168.38.199 # Usable IP range for overcloud nodes (including VIPs), usually this is a shared subnet.
- # Cannot overlap with dhcp_range or introspection_range.
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: 101 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth2 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ - 192.168.38.199
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
phys_type: interface
+ # VLAN tag to use with this NIC
vlan: 101
+ # Physical NIC members of this mappingi
+ # Single value allowed for phys_type: interface
+ # Note: logical names like nic1 are not valid for fdio deployment yet.
members:
- - eth2
- external_overlay: # External network to be created in OpenStack by Services tenant
- name: private_cloud
- type: vlan
- segmentation_id: 101
- gateway: 192.168.38.1
- #
- storage: # Storage network configuration
+ - eth3
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ phys_type: interface
+ vlan: 101
+ members:
+ - eth3
+ # External network to be created in OpenStack by Services tenant
+ external_overlay:
+ name: private_cloud
+ type: vlan
+ segmentation_id: 101
+ gateway: 192.168.38.1
+
+ # Storage network configuration
+ storage:
enabled: true
- cidr: 12.0.0.0/24 # Subnet in CIDR format
- mtu: 1500 # Storage network MTU
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth3 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Subnet in CIDR format
+ cidr: 12.0.0.0/24
+ # Storage network MTU
+ mtu: 1500
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
phys_type: interface
+ # VLAN tag to use with this NIC
vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
members:
+ # Note logical names like nic1 not valid for fdio deployment yet.
- eth3
- #
- api: # API network configuration
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ phys_type: interface
+ vlan: native
+ members:
+ - eth3
+
+ api:
+ # API network configuration
enabled: false
- cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
- vlan: 13 # VLAN tag to use for Overcloud hosts on this network
- mtu: 1500 # Api network MTU
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth4 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Subnet in CIDR format
+ cidr: fd00:fd00:fd00:4000::/64
+ # VLAN tag to use for Overcloud hosts on this network
+ vlan: 13
+ # Api network MTU
+ mtu: 1500
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # VLAN tag to use with this NIC
+ vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ # Note logical names like nic1 not valid for fdio deployment yet.
+ members:
+ - eth4
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
phys_type: interface
vlan: native
members:
@@ -213,8 +309,9 @@ networks: # Network configurations
apex:
networks:
admin:
+ # Range used for introspection phase (examining nodes).
+ # This cannot overlap with dhcp_range or overcloud_ip_range.
+ # for the overcloud default external network
introspection_range:
- 192.0.2.100
- - 192.0.2.120 # Range used for introspection phase (examining nodes). This cannot overlap with dhcp_range or overcloud_ip_range.
- # If the external network 'public' is disabled, then this range will be re-used to configure the floating ip range
- # for the overcloud default external network
+ - 192.0.2.120
diff --git a/config/network/network_settings_v6.yaml b/config/network/network_settings_v6.yaml
index 54c4113..c7e808c 100644
--- a/config/network/network_settings_v6.yaml
+++ b/config/network/network_settings_v6.yaml
@@ -1,3 +1,4 @@
+---
# This configuration file defines Network Environment for a
# Baremetal Deployment of OPNFV. It contains default values
# for 5 following networks:
@@ -34,7 +35,7 @@
# communication. This should be used for IPv6 deployments.
-#Meta data for the network configuration
+# Meta data for the network configuration
network-config-metadata:
title: LF-POD-1 Network config
version: 0.1
@@ -57,115 +58,188 @@ syslog:
transport: 'tcp'
# Common network settings
-networks: # Network configurations
- admin: # Admin configuration (pxe and jumpstart),
+networks:
+ # Admin configuration (pxe and jumpstart)
+ admin:
enabled: true
- installer_vm: # Network settings for the Installer VM on admin network
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ # Network settings for the Installer VM on admin network
+ installer_vm:
+ # Indicates if this VM will be bridged to an interface, or to a bond
+ nic_type: interface
+ # Interfaces to bridge for installer VM (use multiple values for bond)
members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- vlan: native # VLAN tag to use for this network on Installer VM, native means none
- ip: 192.0.2.1 # IP to assign to Installer VM on this network
+ - em1
+ # VLAN tag to use for this network on Installer VM, native means none
+ vlan: native
+ # IP to assign to Installer VM on this network
+ ip: 192.0.2.1
+ # Usable ip range for the overcloud node IPs (including VIPs)
+ # Last IP is used for host bridge (i.e. br-admin).
+ # If empty entire range is usable.
+ # Cannot overlap with dhcp_range or introspection_range.
overcloud_ip_range:
- 192.0.2.11
- - 192.0.2.99 # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be
- # used for host bridge (i.e. br-admin).
- # If empty entire range is usable. Cannot overlap with dhcp_range or introspection_range.
- gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
- cidr: 192.0.2.0/24 # Subnet in CIDR format 192.168.1.0/24
+ - 192.0.2.99
+ # Gateway (only needed when public_network is disabled)
+ gateway: 192.0.2.1
+ # Subnet in CIDR format 192.168.1.0/24
+ cidr: 192.0.2.0/24
+ # DHCP range for the admin network, automatically provisioned if empty
dhcp_range:
- 192.0.2.2
- - 192.0.2.10 # DHCP range for the admin network, if empty it will be automatically provisioned.
- # Cannot overlap with overcloud_ip_range or introspection_range.
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - 192.0.2.10
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # Physical NIC members (Single value allowed for phys_type: interface)
+ members:
- nic1
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
phys_type: interface
members:
- nic1
- #
- tenant: # Tenant network configuration
+
+ # Tenant network configuration
+ tenant:
enabled: true
- cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
- mtu: 1500 # Tenant network MTU
- overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range:
- # VNI, VLAN-ID, etc.
- segmentation_type: vxlan # Tenant network segmentation type:
- # vlan, vxlan, gre
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - nic2 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Subnet in CIDR format 192.168.1.0/24
+ cidr: 11.0.0.0/24
+ # Tenant network MTU
+ mtu: 1500
+ # Tenant network Overlay segmentation ID range:
+ # VNI, VLAN-ID, etc.
+ overlay_id_range: 2,65535
+
+ # Tenant network segmentation type:
+ # vlan, vxlan, gre
+ segmentation_type: vxlan
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface/bond)
phys_type: interface
+ # UIO driver to use for DPDK scenarios.
+ # The value is ignored for non-DPDK scenarios.
+ uio_driver: uio_pci_generic
+ # VLAN tag to use with this NIC
vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
members:
- - nic2 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- #
- external: # Can contain 1 or more external networks
- - public: # "public" network will be the network the installer VM attaches to
+ # Note logical name like nic1 not valid for fdio deployment yet.
+ - nic2
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ # Physical interface type (interface/bond)
+ phys_type: interface
+ vlan: native
+ # Note: logicial names like nic1 are not valid for fdio deployment yet.
+ members:
+ - nic2
+
+ # Can contain 1 or more external networks
+ external:
+ - public:
enabled: true
- mtu: 1500 # Public network MTU
- installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ # Public network MTU
+ mtu: 1500
+ # Network settings for the Installer VM on external network
+ # (note only valid on 'public' external network)
+ installer_vm:
+ # Indicates if this VM will be bridged to an interface, or to a bond
+ nic_type: interface
vlan: native
+ # Interfaces to bridge for installer VM (use multiple values for bond)
members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- ip: 2001:db8::1 # IP to assign to Installer VM on this network
+ - em1
+ # IP to assign to Installer VM on this network
+ ip: 2001:db8::1
cidr: 2001:db8::0/64
gateway: 2001:db8::1
+ # Range to allocate to floating IPs for the public network with Neutron
floating_ip_range:
- - 2001:db8:0:0:0:0:0:2
- - 2001:db8:0:0:ffff:ffff:ffff:ffff # Floating IPs range to assign to the overcloud (External IPs to be NAT'ed to Tenant IP)
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - 2001:db8:0:0:0:0:0:2
+ - 2001:db8:0:0:ffff:ffff:ffff:ffff
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # VLAN tag to use with this NIC
+ vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ members:
- nic3
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
phys_type: interface
vlan: native
members:
- nic3
- external_overlay: # External network to be created in OpenStack by Services tenant
- name: Public_internet
- type: flat
- gateway: 2001:db8::1
- #
- storage: # Storage network configuration
+ # External network to be created in OpenStack by Services tenant
+ external_overlay:
+ name: Public_internet
+ type: flat
+ gateway: 2001:db8::1
+
+ # Storage network configuration
+ storage:
enabled: true
- cidr: fd00:fd00:fd00:2000::/64 # Subnet in CIDR format
- mtu: 1500 # Storage network MTU
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - nic4 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Subnet in CIDR format
+ cidr: fd00:fd00:fd00:2000::/64
+ # Storage network MTU
+ mtu: 1500
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # VLAN tag to use with this NIC
+ vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ members:
+ # Note logical names like nic1 not valid for fdio deployment yet.
+ - nic4
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
phys_type: interface
vlan: native
members:
- nic4
- #
- api: # API network configuration
+
+ api:
+ # API network configuration
enabled: true
- cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
- vlan: 13 # VLAN tag to use for Overcloud hosts on this network
- mtu: 1500 # Api network MTU
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - nic5 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Subnet in CIDR format
+ cidr: fd00:fd00:fd00:4000::/64
+ # VLAN tag to use for Overcloud hosts on this network
+ vlan: 13
+ # Api network MTU
+ mtu: 1500
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # VLAN tag to use with this NIC
+ vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ # Note logical names like nic1 not valid for fdio deployment yet.
+ members:
+ - nic5
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
phys_type: interface
vlan: native
members:
@@ -175,8 +249,9 @@ networks: # Network configurations
apex:
networks:
admin:
+ # Range used for introspection phase (examining nodes).
+ # This cannot overlap with dhcp_range or overcloud_ip_range.
+ # for the overcloud default external network
introspection_range:
- 192.0.2.100
- - 192.0.2.120 # Range used for introspection phase (examining nodes). This cannot overlap with dhcp_range or overcloud_ip_range.
- # If the external network 'public' is disabled, then this range will be re-used to configure the floating ip range
- # for the overcloud default external network
+ - 192.0.2.120
diff --git a/config/network/network_settings_vlans.yaml b/config/network/network_settings_vlans.yaml
index d892289..be4dc66 100644
--- a/config/network/network_settings_vlans.yaml
+++ b/config/network/network_settings_vlans.yaml
@@ -1,3 +1,4 @@
+---
# This configuration file defines Network Environment for a
# Baremetal Deployment of OPNFV. It contains default values
# for 5 following networks:
@@ -34,7 +35,7 @@
# communication. This should be used for IPv6 deployments.
-#Meta data for the network configuration
+# Meta data for the network configuration
network-config-metadata:
title: LF-POD-1 Network config
version: 0.1
@@ -57,154 +58,197 @@ syslog:
transport: 'tcp'
# Common network settings
-networks: # Network configurations
- admin: # Admin configuration (pxe and jumpstart),
+networks:
+ # Admin configuration (pxe and jumpstart)
+ admin:
enabled: true
- installer_vm: # Network settings for the Installer VM on admin network
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ # Network settings for the Installer VM on admin network
+ installer_vm:
+ # Indicates if this VM will be bridged to an interface, or to a bond
+ nic_type: interface
+ # Interfaces to bridge for installer VM (use multiple values for bond)
members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- vlan: native # VLAN tag to use for this network on Installer VM, native means none
- ip: 192.0.2.1 # IP to assign to Installer VM on this network
+ - em1
+ # VLAN tag to use for this network on Installer VM, native means none
+ vlan: native
+ # IP to assign to Installer VM on this network
+ ip: 192.0.2.1
+ # Usable ip range for the overcloud node IPs (including VIPs)
+ # Last IP is used for host bridge (i.e. br-admin).
+ # If empty entire range is usable.
+ # Cannot overlap with dhcp_range or introspection_range.
overcloud_ip_range:
- 192.0.2.11
- - 192.0.2.99 # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be
- # used for host bridge (i.e. br-admin). If empty entire range is usable.
- # Cannot overlap with dhcp_range.
- gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
- cidr: 192.0.2.0/24 # Subnet in CIDR format 192.168.1.0/24
+ - 192.0.2.99
+ # Gateway (only needed when public_network is disabled)
+ gateway: 192.0.2.1
+ # Subnet in CIDR format 192.168.1.0/24
+ cidr: 192.0.2.0/24
+ # DHCP range for the admin network, automatically provisioned if empty
dhcp_range:
- 192.0.2.2
- - 192.0.2.10 # DHCP range for the admin network, if empty it will be automatically provisioned.
- # Cannot overlap with overcloud_ip_range or introspection_range.
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - 192.0.2.10
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # Physical NIC members (Single value allowed for phys_type: interface)
+ members:
- nic1
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
phys_type: interface
members:
- nic1
- #
- tenant: # Tenant network configuration
+
+ # Tenant network configuration
+ tenant:
enabled: true
- cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
- mtu: 1500 # Tenant network MTU
- overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range:
- # VNI, VLAN-ID, etc.
- segmentation_type: vxlan # Tenant network segmentation type:
- # vlan, vxlan, gre
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: 401 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - nic1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Subnet in CIDR format 192.168.1.0/24
+ cidr: 11.0.0.0/24
+ # Tenant network MTU
+ mtu: 1500
+ # Tenant network Overlay segmentation ID range:
+ # VNI, VLAN-ID, etc.
+ overlay_id_range: 2,65535
+
+ # Tenant network segmentation type:
+ # vlan, vxlan, gre
+ segmentation_type: vxlan
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface/bond)
+ phys_type: interface
+ # UIO driver to use for DPDK scenarios.
+ # The value is ignored for non-DPDK scenarios.
+ uio_driver: uio_pci_generic
+ # VLAN tag to use with this NIC
+ vlan: 401
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ members:
+ # Note logical name like nic1 not valid for fdio deployment yet.
+ - nic1
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ # Physical interface type (interface/bond)
phys_type: interface
vlan: 401
+ # Note: logicial names like nic1 are not valid for fdio deployment yet.
members:
- - nic1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- #
- external: # Can contain 1 or more external networks
- - public: # "public" network will be the network the installer VM attaches to
+ - nic1
+
+ # Can contain 1 or more external networks
+ external:
+ - public:
enabled: true
- mtu: 1500 # Public network MTU
- installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ # Public network MTU
+ mtu: 1500
+ # Network settings for the Installer VM on external network
+ # (note only valid on 'public' external network)
+ installer_vm:
+ # Indicates if this VM will be bridged to an interface, or to a bond
+ nic_type: interface
vlan: 501
+ # Interfaces to bridge for installer VM (use multiple values for bond)
members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- ip: 192.168.37.12 # IP to assign to Installer VM on this network
+ - em1
+ # IP to assign to Installer VM on this network
+ ip: 192.168.37.12
cidr: 192.168.37.0/24
gateway: 192.168.37.1
+ # Range to allocate to floating IPs for the public network with Neutron
floating_ip_range:
- 192.168.37.200
- - 192.168.37.220 # Range to allocate to floating IPs for the public network with Neutron
+ - 192.168.37.220
+ # Usable ip range for the overcloud node IPs (including VIPs)
+ # Last IP will be used for host bridge (i.e. br-public).
+ # If empty entire range is usable.
+ # Cannot overlap with dhcp_range or introspection_range.
overcloud_ip_range:
- 192.168.37.10
- - 192.168.37.199 # Usable IP range on the public network, usually this is a shared subnet
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: 501 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - nic1
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ - 192.168.37.199
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
phys_type: interface
+ # VLAN tag to use with this NIC
vlan: 501
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
members:
- nic1
- external_overlay: # External network to be created in OpenStack by Services tenant
- name: Public_internet
- type: flat
- gateway: 192.168.37.1
- - private_cloud: # another external network
- enabled: false
- mtu: 1500
- installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
- vlan: 501
- members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- ip: 192.168.38.12 # IP to assign to Installer VM on this network
- cidr: 192.168.38.0/24
- gateway: 192.168.38.1
- floating_ip_range:
- - 192.168.38.200
- - 192.168.38.220 # Range to allocate to floating IPs for the public network with Neutron
- overcloud_ip_range:
- - 192.168.38.10
- - 192.168.38.199 # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be used for host
- # bridge (i.e. br-public). If empty entire range is usable.
- # Cannot overlap with dhcp_range or introspection_range.
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: 502 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
phys_type: interface
- vlan: 502
+ vlan: 501
members:
- - eth1
- external_overlay: # External network to be created in OpenStack by Services tenant
- name: private_cloud
- type: vlan
- segmentation_id: 101
- gateway: 192.168.38.1
- #
- storage: # Storage network configuration
+ - nic1
+ # External network to be created in OpenStack by Services tenant
+ external_overlay:
+ name: Public_internet
+ type: flat
+ gateway: 192.168.37.1
+
+ # Storage network configuration
+ storage:
enabled: true
- cidr: 12.0.0.0/24 # Subnet in CIDR format
- mtu: 1500 # Storage network MTU
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: 201 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - nic4 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Subnet in CIDR format
+ cidr: 12.0.0.0/24
+ # Storage network MTU
+ mtu: 1500
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
phys_type: interface
+ # VLAN tag to use with this NIC
vlan: 201
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
members:
+ # Note logical names like nic1 not valid for fdio deployment yet.
- nic4
- #
- api: # API network configuration
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ phys_type: interface
+ vlan: native
+ members:
+ - nic4
+
+ api:
+ # API network configuration
enabled: false
- cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
- mtu: 1500 # Api network MTU
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: 101 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - nic5 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Subnet in CIDR format
+ cidr: fd00:fd00:fd00:4000::/64
+ # VLAN tag to use for Overcloud hosts on this network
+ vlan: 13
+ # Api network MTU
+ mtu: 1500
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
phys_type: interface
+ # VLAN tag to use with this NIC
vlan: 101
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ # Note logical names like nic1 not valid for fdio deployment yet.
+ members:
+ - nic5
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ phys_type: interface
+ vlan: native
members:
- nic5
@@ -212,8 +256,9 @@ networks: # Network configurations
apex:
networks:
admin:
+ # Range used for introspection phase (examining nodes).
+ # This cannot overlap with dhcp_range or overcloud_ip_range.
+ # for the overcloud default external network
introspection_range:
- 192.0.2.100
- - 192.0.2.120 # Range used for introspection phase (examining nodes). This cannot overlap with dhcp_range or overcloud_ip_range.
- # If the external network 'public' is disabled, then this range will be re-used to configure the floating ip range
- # for the overcloud default external network
+ - 192.0.2.120
diff --git a/config/network/network_settings_vpp.yaml b/config/network/network_settings_vpp.yaml
index c679215..f080af1 100644
--- a/config/network/network_settings_vpp.yaml
+++ b/config/network/network_settings_vpp.yaml
@@ -1,3 +1,4 @@
+---
# This configuration file defines Network Environment for a
# Baremetal Deployment of OPNFV. It contains default values
# for 5 following networks:
@@ -34,7 +35,7 @@
# communication. This should be used for IPv6 deployments.
-#Meta data for the network configuration
+# Meta data for the network configuration
network-config-metadata:
title: LF-POD-1 Network config
version: 0.1
@@ -57,152 +58,245 @@ syslog:
transport: 'tcp'
# Common network settings
-networks: # Network configurations
- admin: # Admin configuration (pxe and jumpstart),
+networks:
+ # Admin configuration (pxe and jumpstart)
+ admin:
enabled: true
- installer_vm: # Network settings for the Installer VM on admin network
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ # Network settings for the Installer VM on admin network
+ installer_vm:
+ # Indicates if this VM will be bridged to an interface, or to a bond
+ nic_type: interface
+ # Interfaces to bridge for installer VM (use multiple values for bond)
members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- vlan: native # VLAN tag to use for this network on Installer VM, native means none
- ip: 192.0.2.1 # IP to assign to Installer VM on this network
+ - em1
+ # VLAN tag to use for this network on Installer VM, native means none
+ vlan: native
+ # IP to assign to Installer VM on this network
+ ip: 192.0.2.1
+ # Usable ip range for the overcloud node IPs (including VIPs)
+ # Last IP is used for host bridge (i.e. br-admin).
+ # If empty entire range is usable.
+ # Cannot overlap with dhcp_range or introspection_range.
overcloud_ip_range:
- 192.0.2.11
- - 192.0.2.99 # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be
- # used for host bridge (i.e. br-admin). If empty entire range is usable.
- # Cannot overlap with dhcp_range or introspection_range.
- gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
- cidr: 192.0.2.0/24 # Subnet in CIDR format 192.168.1.0/24
+ - 192.0.2.99
+ # Gateway (only needed when public_network is disabled)
+ gateway: 192.0.2.1
+ # Subnet in CIDR format 192.168.1.0/24
+ cidr: 192.0.2.0/24
+ # DHCP range for the admin network, automatically provisioned if empty
dhcp_range:
- 192.0.2.2
- - 192.0.2.10 # DHCP range for the admin network, if empty it will be automatically provisioned
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - 192.0.2.10
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # Physical NIC members (Single value allowed for phys_type: interface)
+ members:
- eth0
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
phys_type: interface
members:
- eth0
- #
- tenant: # Tenant network configuration
+
+ # Tenant network configuration
+ tenant:
enabled: true
- cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
- mtu: 1500 # Tenant network MTU
- overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range:
- # VNI, VLAN-ID, etc.
- segmentation_type: vxlan # Tenant network segmentation type:
- # vlan, vxlan, gre
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface/bond)
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth1 # Note that logic nic name like nic1 cannot be used for fdio deployment yet.
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
- phys_type: interface # Physical interface type (interface/bond)
+ # Subnet in CIDR format 192.168.1.0/24
+ cidr: 11.0.0.0/24
+ # Tenant network MTU
+ mtu: 1500
+ # Tenant network Overlay segmentation ID range:
+ # VNI, VLAN-ID, etc.
+ overlay_id_range: 2,65535
+
+ # Tenant network segmentation type:
+ # vlan, vxlan, gre
+ segmentation_type: vxlan
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface/bond)
+ phys_type: interface
+ # VLAN tag to use with this NIC
+ vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ members:
+ # Note logical name like nic1 not valid for fdio deployment yet.
+ - eth1
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ # Physical interface type (interface/bond)
+ phys_type: interface
vlan: native
+ # Note: logicial names like nic1 are not valid for fdio deployment yet.
members:
- - eth1 # Note that logic nic name like nic1 cannot be used for fdio deployment yet.
- #
- external: # Can contain 1 or more external networks
- - public: # "public" network will be the network the installer VM attaches to
+ - eth1
+
+ # Can contain 1 or more external networks
+ external:
+ - public:
enabled: true
- mtu: 1500 # Public network MTU
- installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ # Public network MTU
+ mtu: 1500
+ # Network settings for the Installer VM on external network
+ # (note only valid on 'public' external network)
+ installer_vm:
+ # Indicates if this VM will be bridged to an interface, or to a bond
+ nic_type: interface
vlan: native
+ # Interfaces to bridge for installer VM (use multiple values for bond)
members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- ip: 192.168.37.1 # IP to assign to Installer VM on this network
+ - em1
+ # IP to assign to Installer VM on this network
+ ip: 192.168.37.1
cidr: 192.168.37.0/24
gateway: 192.168.37.1
+ # Range to allocate to floating IPs for the public network with Neutron
floating_ip_range:
- 192.168.37.200
- - 192.168.37.220 # Range to allocate to floating IPs for the public network with Neutron
+ - 192.168.37.220
+ # Usable ip range for the overcloud node IPs (including VIPs)
+ # Last IP will be used for host bridge (i.e. br-public).
+ # If empty entire range is usable.
+ # Cannot overlap with dhcp_range or introspection_range.
overcloud_ip_range:
- 192.168.37.10
- - 192.168.37.199 # Usable ip range for the overcloud node IPs (including VIPs) and last IP will be used for host
- # bridge (i.e. br-public). If empty entire range is usable. Cannot overlap with dhcp_range or introspection_range.
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - 192.168.37.199
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # VLAN tag to use with this NIC
+ vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ members:
- eth2
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
phys_type: interface
vlan: native
members:
- eth2
- external_overlay: # External network to be created in OpenStack by Services tenant
- name: Public_internet
- type: flat
- gateway: 192.168.37.1
- - private_cloud: # another external network
+ # External network to be created in OpenStack by Services tenant
+ external_overlay:
+ name: Public_internet
+ type: flat
+ gateway: 192.168.37.1
+ # another external network
+ # This is an example and not yet supported
+ - private_cloud:
enabled: false
mtu: 1500
- installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
- nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ # Network settings for the Installer VM on external network
+ # note only valid on 'public' external network
+ installer_vm:
+ # Indicates if this VM will be bridged to an interface, or to a bond
+ nic_type: interface
vlan: 101
+ # Interfaces to bridge for installer VM (use multiple values for bond)
members:
- - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
- ip: 192.168.38.1 # IP to assign to Installer VM on this network
+ - em1
+ # IP to assign to Installer VM on this network
+ ip: 192.168.38.1
cidr: 192.168.38.0/24
gateway: 192.168.38.1
+ # Range to allocate to floating IPs for the public network with Neutron
floating_ip_range:
- 192.168.38.200
- - 192.168.38.220 # Range to allocate to floating IPs for the public network with Neutron
+ - 192.168.38.220
+ # Usable IP range for overcloud nodes (including VIPs)i
+ # usually this is a shared subnet.
+ # Cannot overlap with dhcp_range or introspection_range.
overcloud_ip_range:
- 192.168.38.10
- - 192.168.38.199 # Usable IP range for overcloud nodes (including VIPs), usually this is a shared subnet.
- # Cannot overlap with dhcp_range or introspection_range.
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: 101 # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth3 # Note that logic nic name like nic1 cannot be used for fdio deployment yet.
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ - 192.168.38.199
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
phys_type: interface
+ # VLAN tag to use with this NIC
vlan: 101
+ # Physical NIC members of this mappingi
+ # Single value allowed for phys_type: interface
+ # Note: logical names like nic1 are not valid for fdio deployment yet.
members:
- eth3
- external_overlay: # External network to be created in OpenStack by Services tenant
- name: private_cloud
- type: vlan
- segmentation_id: 101
- gateway: 192.168.38.1
- #
- storage: # Storage network configuration
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ phys_type: interface
+ vlan: 101
+ members:
+ - eth3
+ # External network to be created in OpenStack by Services tenant
+ external_overlay:
+ name: private_cloud
+ type: vlan
+ segmentation_id: 101
+ gateway: 192.168.38.1
+
+ # Storage network configuration
+ storage:
enabled: true
- cidr: 12.0.0.0/24 # Subnet in CIDR format
- mtu: 1500 # Storage network MTU
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth3 # Note that logic nic name like nic1 cannot be used for fdio deployment yet.
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Subnet in CIDR format
+ cidr: 12.0.0.0/24
+ # Storage network MTU
+ mtu: 1500
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
phys_type: interface
+ # VLAN tag to use with this NIC
vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
members:
+ # Note logical names like nic1 not valid for fdio deployment yet.
- eth3
- #
- api: # API network configuration
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ phys_type: interface
+ vlan: native
+ members:
+ - eth3
+
+ api:
+ # API network configuration
enabled: false
- cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
- vlan: 13 # VLAN tag to use for Overcloud hosts on this network
- mtu: 1500 # Api network MTU
- nic_mapping: # Mapping of network configuration for Overcloud Nodes
- compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
- phys_type: interface # Physical interface type (interface or bond)
- vlan: native # VLAN tag to use with this NIC
- members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
- - eth4 # Note that logic nic name like nic1 cannot be used for fdio deployment yet.
- controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ # Subnet in CIDR format
+ cidr: fd00:fd00:fd00:4000::/64
+ # VLAN tag to use for Overcloud hosts on this network
+ vlan: 13
+ # Api network MTU
+ mtu: 1500
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # VLAN tag to use with this NIC
+ vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ # Note logical names like nic1 not valid for fdio deployment yet.
+ members:
+ - eth4
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
phys_type: interface
vlan: native
members:
@@ -212,8 +306,9 @@ networks: # Network configurations
apex:
networks:
admin:
+ # Range used for introspection phase (examining nodes).
+ # This cannot overlap with dhcp_range or overcloud_ip_range.
+ # for the overcloud default external network
introspection_range:
- 192.0.2.100
- - 192.0.2.120 # Range used for introspection phase (examining nodes). This cannot overlap with dhcp_range or overcloud_ip_range.
- # If the external network 'public' is disabled, then this range will be re-used to configure the floating ip range
- # for the overcloud default external network
+ - 192.0.2.120
diff --git a/tests/config/inventory.yaml b/tests/config/inventory.yaml
index 607df29..2abe0fc 100644
--- a/tests/config/inventory.yaml
+++ b/tests/config/inventory.yaml
@@ -1,3 +1,4 @@
+---
nodes:
node1:
mac_address: "00:25:B5:cc:00:1e"