summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/Makefile4
-rw-r--r--build/nics-template.yaml.jinja236
-rwxr-xr-xbuild/undercloud.sh2
-rwxr-xr-xci/deploy.sh31
-rw-r--r--config/network/network_settings.yaml302
-rw-r--r--config/network/network_settings_v6.yaml285
-rw-r--r--config/network/network_settings_vlans.yaml287
-rw-r--r--docs/installationprocedure/baremetal.rst5
-rw-r--r--docs/installationprocedure/index.rst1
-rw-r--r--docs/installationprocedure/troubleshooting.rst144
-rw-r--r--docs/installationprocedure/virtualinstall.rst2
-rw-r--r--lib/common-functions.sh10
-rwxr-xr-xlib/configure-deps-functions.sh18
-rwxr-xr-xlib/overcloud-deploy-functions.sh17
-rwxr-xr-xlib/parse-functions.sh8
-rwxr-xr-xlib/post-install-functions.sh30
-rw-r--r--lib/python/apex/common/constants.py15
-rw-r--r--lib/python/apex/network_environment.py126
-rw-r--r--lib/python/apex/network_settings.py319
-rwxr-xr-xlib/python/apex_python_utils.py33
-rwxr-xr-xlib/undercloud-functions.sh113
-rwxr-xr-xlib/virtual-setup-functions.sh6
-rw-r--r--tests/config/network_settings_duplicate_nic.yaml115
-rw-r--r--tests/config/network_settings_missing_required_nic.yaml113
-rw-r--r--tests/config/network_settings_nic1_reserved.yaml113
-rw-r--r--tests/config/network_settings_nics_not_specified.yaml107
-rw-r--r--tests/test_apex_network_environment.py119
-rw-r--r--tests/test_apex_network_settings.py142
-rw-r--r--tests/test_apex_python_utils_py.py1
29 files changed, 1269 insertions, 1235 deletions
diff --git a/build/Makefile b/build/Makefile
index 52ba8986..d17b8b52 100644
--- a/build/Makefile
+++ b/build/Makefile
@@ -19,9 +19,6 @@ export RPMODL = $(shell pwd)/noarch/opnfv-apex-$(RPMVERS)-$(shell echo ${RELEASE
export RPMONO = $(shell pwd)/noarch/opnfv-apex-onos-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
export RPMSFC = $(shell pwd)/noarch/opnfv-apex-opendaylight-sfc-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
-all_networks="admin_network private_network storage_network external_network api_network"
-
-
.PHONY: all
all: iso
@@ -400,6 +397,7 @@ iso: iso-clean images rpms $(CENTISO)
cd centos/Packages && yumdownloader ipxe-roms-qemu
cd centos/Packages && curl -O https://radez.fedorapeople.org/python34-markupsafe-0.23-9.el7.centos.x86_64.rpm
cd centos/Packages && curl -O https://radez.fedorapeople.org/python3-jinja2-2.8-5.el7.centos.noarch.rpm
+ cd centos/Packages && curl -O http://artifacts.opnfv.org/apex/dependencies/python3-ipmi-0.3.0-1.noarch.rpm
# regenerate yum repo data
@echo "Generating new yum metadata"
createrepo --update -g ../c7-opnfv-x86_64-comps.xml centos
diff --git a/build/nics-template.yaml.jinja2 b/build/nics-template.yaml.jinja2
index 0680a26f..ee830114 100644
--- a/build/nics-template.yaml.jinja2
+++ b/build/nics-template.yaml.jinja2
@@ -85,16 +85,16 @@ resources:
os_net_config:
network_config:
-
- {%- if vlans['private_network'] is number or vlans['storage_network'] is number or vlans['api_network'] is number or vlans['public_network'] is number %}
+ {%- if nets['tenant']['nic_mapping'][role]['vlan'] is number or nets['storage']['nic_mapping'][role]['vlan'] is number or nets['api']['nic_mapping'][role]['vlan'] is number or nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
type: ovs_bridge
name: {get_input: bridge_name}
members:
-
type: interface
- name: {{ nics[role]['admin_network'] }}
+ name: {{ nets[role]['admin']['members'][0] }}
# force the MAC address of the bridge to this interface
primary: true
- {%- if 'public_network' in enabled_networks and vlans['public_network'] is number %}
+ {%- if nets['external'][0]['enabled'] and nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
-
type: vlan
vlan_id: {get_param: ExternalNetworkVlanID}
@@ -106,7 +106,7 @@ resources:
default: true
next_hop: {get_param: ExternalInterfaceDefaultRoute}
{%- endif %}
- {%- if 'private_network' in enabled_networks and vlans['private_network'] is number %}
+ {%- if nets['tenant']['enabled'] and nets['tenant']['nic_mapping'][role]['vlan'] is number %}
-
type: vlan
vlan_id: {get_param: TenantNetworkVlanID}
@@ -114,7 +114,7 @@ resources:
-
ip_netmask: {get_param: TenantIpSubnet}
{%- endif %}
- {%- if 'storage_network' in enabled_networks and vlans['storage_network'] is number %}
+ {%- if nets['storage']['enabled'] and nets['storage']['nic_mapping'][role]['vlan'] is number %}
-
type: vlan
vlan_id: {get_param: StorageNetworkVlanID}
@@ -122,7 +122,7 @@ resources:
-
ip_netmask: {get_param: StorageIpSubnet}
{%- endif %}
- {%- if 'api_network' in enabled_networks and vlans['api_network'] is number %}
+ {%- if nets['api']['enabled'] and nets['api']['nic_mapping'][role]['vlan'] is number %}
-
type: vlan
vlan_id: {get_param: InternalApiNetworkVlanID}
@@ -132,7 +132,7 @@ resources:
{%- endif %}
{%- else %}
type: interface
- name: {{ nics[role]['admin_network'] }}
+ name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
{%- endif %}
use_dhcp: false
dns_servers: {get_param: DnsServers}
@@ -153,7 +153,7 @@ resources:
next_hop: {get_param: ControlPlaneDefaultRoute}
{%- endif %}
- {%- if 'private_network' in enabled_networks and vlans['private_network'] == 'native' %}
+ {%- if nets['tenant']['enabled'] and nets['tenant']['nic_mapping'][role]['vlan'] == 'native' %}
{%- if ovs_dpdk_bridge == 'br-phy' and role == 'compute' %}
-
type: ovs_bridge
@@ -165,7 +165,7 @@ resources:
members:
-
type: interface
- name: {{ nics[role]['private_network'] }}
+ name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
# force the MAC address of the bridge to this interface
primary: true
-
@@ -175,17 +175,17 @@ resources:
{%- else %}
-
type: interface
- name: {{ nics[role]['private_network'] }}
+ name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
use_dhcp: false
addresses:
-
ip_netmask: {get_param: TenantIpSubnet}
{%- endif %}
{%- endif %}
- {%- if 'public_network' in enabled_networks and external_net_type == 'interface' and vlans['public_network'] == 'native' %}
+ {%- if nets['external'][0]['enabled'] and external_net_type == 'interface' and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
-
type: interface
- name: {{ nics[role]['public_network'] }}
+ name: {{ nets['external'][0]['nic_mapping'][role]['members'][0] }}
{%- if role == 'controller' %}
dns_servers: {get_param: DnsServers}
{%- endif %}
@@ -200,7 +200,7 @@ resources:
{%- endif %}
ip_netmask: 0.0.0.0/0
next_hop: {get_param: ExternalInterfaceDefaultRoute}
- {%- elif 'public_network' in enabled_networks and external_net_type == 'br-ex' and vlans['public_network'] == 'native' %}
+ {%- elif nets['external'][0]['enabled'] and external_net_type == 'br-ex' and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
-
type: ovs_bridge
name: {get_input: bridge_name}
@@ -208,7 +208,7 @@ resources:
members:
-
type: interface
- name: {{ nics[role]['public_network'] }}
+ name: {{ nets['external'][0]['nic_mapping'][role]['members'][0] }}
# force the MAC address of the bridge to this interface
primary: true
{%- if role == 'controller' %}
@@ -223,19 +223,19 @@ resources:
next_hop: {get_param: ExternalInterfaceDefaultRoute}
{%- endif %}
{%- endif %}
- {%- if 'storage_network' in enabled_networks and vlans['storage_network'] == 'native' %}
+ {%- if nets['storage']['enabled'] and nets['storage']['nic_mapping'][role]['vlan'] == 'native' %}
-
type: interface
- name: {{ nics[role]['storage_network'] }}
+ name: {{ nets['storage']['nic_mapping'][role]['members'][0] }}
use_dhcp: false
addresses:
-
ip_netmask: {get_param: StorageIpSubnet}
{%- endif %}
- {%- if 'api_network' in enabled_networks and vlans['api_network'] == 'native' %}
+ {%- if nets['api']['enabled'] and nets['api']['nic_mapping'][role]['vlan'] == 'native' %}
-
type: interface
- name: {{ nics[role]['api_network'] }}
+ name: {{ nets['api']['nic_mapping'][role]['members'][0] }}
use_dhcp: false
addresses:
-
diff --git a/build/undercloud.sh b/build/undercloud.sh
index a4d008ee..3cc56009 100755
--- a/build/undercloud.sh
+++ b/build/undercloud.sh
@@ -62,7 +62,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \
# Add custom IPA to allow kernel params
wget https://raw.githubusercontent.com/trozet/ironic-python-agent/opnfv_kernel/ironic_python_agent/extensions/image.py
-python3.4 -c 'import py_compile; py_compile.compile("image.py", cfile="image.pyc")'
+python3 -c 'import py_compile; py_compile.compile("image.py", cfile="image.pyc")'
# Add performance image scripts
LIBGUESTFS_BACKEND=direct virt-customize --upload ../build_perf_image.sh:/home/stack \
diff --git a/ci/deploy.sh b/ci/deploy.sh
index cd90511c..edc6062a 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -25,8 +25,6 @@ green=$(tput setaf 2 || echo "")
interactive="FALSE"
ping_site="8.8.8.8"
ntp_server="pool.ntp.org"
-net_isolation_enabled="TRUE"
-net_isolation_arg=""
post_config="TRUE"
debug="FALSE"
@@ -42,18 +40,18 @@ DEPLOY_OPTIONS=""
CONFIG=${CONFIG:-'/var/opt/opnfv'}
RESOURCES=${RESOURCES:-"$CONFIG/images"}
LIB=${LIB:-"$CONFIG/lib"}
-OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network api_network"
+OPNFV_NETWORK_TYPES="admin tenant external storage api"
VM_CPUS=4
VM_RAM=8
VM_COMPUTES=1
# Netmap used to map networks to OVS bridge names
-NET_MAP['admin_network']="br-admin"
-NET_MAP['private_network']="br-private"
-NET_MAP['public_network']="br-public"
-NET_MAP['storage_network']="br-storage"
-NET_MAP['api_network']="br-api"
+NET_MAP['admin']="br-admin"
+NET_MAP['tenant']="br-tenant"
+NET_MAP['external']="br-external"
+NET_MAP['storage']="br-storage"
+NET_MAP['api']="br-api"
ext_net_type="interface"
ip_address_family=4
@@ -129,12 +127,6 @@ parse_cmdline() {
echo "Executing a Virtual Deployment"
shift 1
;;
- --flat )
- net_isolation_enabled="FALSE"
- net_isolation_arg="--flat"
- echo "Underlay Network Isolation Disabled: using flat configuration"
- shift 1
- ;;
--no-post-config )
post_config="FALSE"
echo "Post install configuration disabled"
@@ -173,9 +165,7 @@ parse_cmdline() {
done
sleep 2
- if [[ ! -z "$NETSETS" && "$net_isolation_enabled" == "FALSE" ]]; then
- echo -e "${red}INFO: Single flat network requested. Only admin_network settings will be used!${reset}"
- elif [[ -z "$NETSETS" ]]; then
+ if [[ -z "$NETSETS" ]]; then
echo -e "${red}ERROR: You must provide a network_settings file with -n.${reset}"
exit 1
fi
@@ -206,11 +196,6 @@ parse_cmdline() {
exit 1
fi
- if [[ "$net_isolation_enabled" == "FALSE" && "$post_config" == "TRUE" ]]; then
- echo -e "${blue}INFO: Post Install Configuration will be skipped. It is not supported with --flat${reset}"
- post_config="FALSE"
- fi
-
}
main() {
@@ -247,7 +232,7 @@ main() {
fi
fi
if [[ "${deploy_options_array['sdn_controller']}" == 'onos' ]]; then
- if ! onos_update_gw_mac ${public_network_cidr} ${public_network_gateway}; then
+ if ! onos_update_gw_mac ${external_cidr} ${external_gateway}; then
echo -e "${red}ERROR:ONOS Post Install Configuration Failed, Exiting.${reset}"
exit 1
else
diff --git a/config/network/network_settings.yaml b/config/network/network_settings.yaml
index f7680643..ab9ed962 100644
--- a/config/network/network_settings.yaml
+++ b/config/network/network_settings.yaml
@@ -1,118 +1,220 @@
# This configuration file defines Network Environment for a
# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
+# for 5 following networks:
#
# - admin
-# - private*
-# - public
+# - tenant*
+# - external*
# - storage*
-#
+# - api*
# *) optional networks
#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
+# if not explicitly configured.
#
# See short description of the networks in the comments below.
#
+# "admin" is the short name for Control Plane Network.
+# This network should be IPv4 even it is an IPv6 deployment
+# IPv6 does not have PXE boot support.
+# During OPNFV deployment it is used for node provisioning which will require
+# PXE booting as well as running a DHCP server on this network. Be sure to
+# disable any other DHCP/TFTP server on this network.
+#
+# "tenant" is the network used for tenant traffic.
+#
+# "external" is the network which should have internet or external
+# connectivity. External OpenStack networks will be configured to egress this
+# network. There can be multiple external networks, but only one assigned as
+# "public" which OpenStack public API's will register.
+#
+# "storage" is the network for storage I/O.
+#
+# "api" is an optional network for splitting out OpenStack service API
+# communication. This should be used for IPv6 deployments.
-# DNS Servers for all nodes, comma delimited list
-dns_servers: ["8.8.8.8", "8.8.4.4"]
-# Domain name to use for undercloud/overcloud nodes
-domain_name: 'opnfvapex.com'
+#Meta data for the network configuration
+network-config-metadata:
+ title: LF-POD-1 Network config
+ version: 0.1
+ created: Mon Dec 28 2015
+ comment: None
-# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
-#
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- compute_interface: nic1
- controller_interface: nic1
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
+# DNS Settings
+dns-domain: opnfvlf.org
+dns-search: opnfvlf.org
+dns_nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+# NTP servers
+ntp:
+ - 0.se.pool.ntp.org
+ - 1.se.pool.ntp.org
+# Syslog server
+syslog:
+ server: 10.128.1.24
+ transport: 'tcp'
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
-#
-private_network:
- enabled: true
- vlan: native
- cidr: 11.0.0.0/24
- compute_interface: nic2
- controller_interface: nic2
+networks: # Network configurations
+ admin: # Admin configuration (pxe and jumpstart),
+ enabled: true
+ installer_vm: # Network settings for the Installer VM on admin network
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ vlan: native # VLAN tag to use for this network on Installer VM, native means none
+ ip: 192.0.2.1 # IP to assign to Installer VM on this network
+ usable_ip_range:
+ - 192.0.2.11
+ - 192.0.2.99 # Usable ip range, if empty entire range is usable
+ gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
+ cidr: 192.0.2.0/24 # Subnet in CIDR format 192.168.1.0/24
+ dhcp_range:
+ - 192.0.2.2
+ - 192.0.2.10 # DHCP range for the admin network, if empty it will be automatically provisioned
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic1
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ members:
+ - nic1
+ #
+ tenant: # Tenant network configuration
+ enabled: true
+ cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
+ mtu: 64000 # Tenant network MTU
+ overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range:
+ # VNI, VLAN-ID, etc.
+ segmentation_type: vxlan # Tenant network segmentation type:
+ # vlan, vxlan, gre
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic2 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: native
+ members:
+ - nic2 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ #
+ external: # Can contain 1 or more external networks
+ - public: # "public" network will be the network the installer VM attaches to
+ enabled: true
+ mtu: 64000 # Public network MTU
+ installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ vlan: native
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ ip: 192.168.37.1 # IP to assign to Installer VM on this network
+ cidr: 192.168.37.0/24
+ gateway: 192.168.37.1
+ floating_ip_range:
+ - 192.168.37.200
+ - 192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
+ usable_ip_range:
+ - 192.168.37.10
+ - 192.168.37.199 # Usable IP range on the public network, usually this is a shared subnet
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic3
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: native
+ members:
+ - nic3
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: Public_internet
+ type: flat
+ gateway: 192.168.37.1
+ - private_cloud: # another external network
+ enabled: false
+ mtu: 64000
+ installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ vlan: 101
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ ip: 192.168.38.1 # IP to assign to Installer VM on this network
+ cidr: 192.168.38.0/24
+ gateway: 192.168.38.1
+ floating_ip_range:
+ - 192.168.38.200
+ - 192.168.38.220 #Range to allocate to floating IPs for the public network with Neutron
+ usable_ip_range:
+ - 192.168.38.10
+ - 192.168.38.199 # Usable IP range on the public network, usually this is a shared subnet
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 101 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic3 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 101
+ members:
+ - nic3
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: private_cloud
+ type: vlan
+ segmentation_id: 101
+ gateway: 192.168.38.1
+ #
+ storage: # Storage network configuration
+ enabled: true
+ cidr: 12.0.0.0/24 # Subnet in CIDR format
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic4 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: native
+ members:
+ - nic4
+ #
+ api: # API network configuration
+ enabled: false
+ cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
+ vlan: 13 # VLAN tag to use for Overcloud hosts on this network
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic5 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: native
+ members:
+ - nic5
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
-#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- compute_interface: nic3
- controller_interface: nic3
- vlan: native
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220
- usable_ip_range: 192.168.37.10,192.168.37.199
- provisioner_ip: 192.168.37.1
+# JOID specific settings
+joid:
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
-#
-storage_network:
- enabled: true
- vlan: native
- cidr: 12.0.0.0/24
- compute_interface: nic4
- controller_interface: nic4
+# Compass specific settings
+compass:
-#admin_network:
-# enabled: true
-# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond
-# bridged_interface: '' #Interface to bridge to for installer VM
-# bond_interfaces: '' #Interfaces to create bond with for installer VM
-# compute_interface: nic4 #Interface used for this network on the compute node. Can either be logical nic name like "nic1" or real name like "eth1"
-# controller_interface: nic4 #Interface used for this network on the controller node. Can either be logical nic name like "nic1" or real name like "eth1"
-# vlan: native #VLAN tag to use, native means none
-# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
-# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected
-# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet
-# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
-# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned
-# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes)
-#private_network:
-# enabled: false #If disabled, internal api traffic will collapse to admin_network
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
+# Apex specific settings
+apex:
+ networks:
+ admin:
+ introspection_range:
+ - 192.0.2.100
+ - 192.0.2.120 # Range used for introspection phase (examining nodes)
+# Fuel specific settings
+fuel:
diff --git a/config/network/network_settings_v6.yaml b/config/network/network_settings_v6.yaml
index dd2d066e..bfce3ab4 100644
--- a/config/network/network_settings_v6.yaml
+++ b/config/network/network_settings_v6.yaml
@@ -1,141 +1,184 @@
# This configuration file defines Network Environment for a
# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
+# for 5 following networks:
#
# - admin
-# - private*
-# - public
+# - tenant*
+# - external*
# - storage*
# - api*
-#
# *) optional networks
#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
+# if not explicitly configured.
#
# See short description of the networks in the comments below.
#
-
-# DNS Servers for all nodes, comma delimited list
-dns_servers: ["8.8.8.8", "8.8.4.4"]
-
# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
-# This network must be IPv4 currently.
-domain_name: 'opnfvapex.com'
-
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- compute_interface: nic1
- controller_interface: nic1
- vlan: native
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
-
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
-# This network must be IPv4 currently.
+# This network should be IPv4 even it is an IPv6 deployment
+# IPv6 does not have PXE boot support.
+# During OPNFV deployment it is used for node provisioning which will require
+# PXE booting as well as running a DHCP server on this network. Be sure to
+# disable any other DHCP/TFTP server on this network.
+#
+# "tenant" is the network used for tenant traffic.
+#
+# "external" is the network which should have internet or external
+# connectivity. External OpenStack networks will be configured to egress this
+# network. There can be multiple external networks, but only one assigned as
+# "public" which OpenStack public API's will register.
+#
+# "storage" is the network for storage I/O.
#
-private_network:
- enabled: true
- cidr: 11.0.0.0/24
- vlan: native
- cidr: 11.0.0.0/24
- compute_interface: nic2
- controller_interface: nic2
+# "api" is an optional network for splitting out OpenStack service API
+# communication. This should be used for IPv6 deployments.
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
-#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- compute_interface: nic3
- controller_interface: nic3
- vlan: native
- cidr: 2001:db8::/64
- gateway: 2001:db8::1
- provisioner_ip: 2001:db8::1
+#Meta data for the network configuration
+network-config-metadata:
+ title: LF-POD-1 Network config
+ version: 0.1
+ created: Mon Dec 28 2015
+ comment: None
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
-#
-storage_network:
- enabled: true
- cidr: fd00:fd00:fd00:2000::/64
- vlan: native
- compute_interface: nic4
- controller_interface: nic4
+# DNS Settings
+dns-domain: opnfvlf.org
+dns-search: opnfvlf.org
+dns_nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+# NTP servers
+ntp:
+ - 0.se.pool.ntp.org
+ - 1.se.pool.ntp.org
+# Syslog server
+syslog:
+ server: 10.128.1.24
+ transport: 'tcp'
+networks: # Network configurations
+ admin: # Admin configuration (pxe and jumpstart),
+ enabled: true
+ installer_vm: # Network settings for the Installer VM on admin network
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ vlan: native # VLAN tag to use for this network on Installer VM, native means none
+ ip: 192.0.2.1 # IP to assign to Installer VM on this network
+ usable_ip_range:
+ - 192.0.2.11
+ - 192.0.2.99 # Usable ip range, if empty entire range is usable
+ gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
+ cidr: 192.0.2.0/24 # Subnet in CIDR format 192.168.1.0/24
+ dhcp_range:
+ - 192.0.2.2
+ - 192.0.2.10 # DHCP range for the admin network, if empty it will be automatically provisioned
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic1
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ members:
+ - nic1
+ #
+ tenant: # Tenant network configuration
+ enabled: true
+ cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
+ mtu: 64000 # Tenant network MTU
+ overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range:
+ # VNI, VLAN-ID, etc.
+ segmentation_type: vxlan # Tenant network segmentation type:
+ # vlan, vxlan, gre
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic2 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: native
+ members:
+ - nic2 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ #
+ external: # Can contain 1 or more external networks
+ - public: # "public" network will be the network the installer VM attaches to
+ enabled: true
+ mtu: 64000 # Public network MTU
+ installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ vlan: native
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ ip: 2001:db8::1 # IP to assign to Installer VM on this network
+ cidr: 2001:db8::0/64
+ gateway: 2001:db8::1
+ floating_ip_range:
+ - 2001:db8:0:0:0:0:0:2
+ - 2001:db8:0:0:ffff:ffff:ffff:ffff
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic3
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: native
+ members:
+ - nic3
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: Public_internet
+ type: flat
+ gateway: 2001:db8::1
+ #
+ storage: # Storage network configuration
+ enabled: true
+ cidr: fd00:fd00:fd00:2000::/64 # Subnet in CIDR format
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic4 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: native
+ members:
+ - nic4
+ #
+ api: # API network configuration
+ enabled: true
+ cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
+ vlan: 13 # VLAN tag to use for Overcloud hosts on this network
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: native # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic5 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: native
+ members:
+ - nic5
-# "api" is an optional network used by internal openstack api services.
-api_network:
- enabled: true
- cidr: fd00:fd00:fd00:4000::/64
- vlan: native
- compute_interface: nic5
- controller_interface: nic5
+# JOID specific settings
+joid:
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#
-#private_network:
-# enabled: false #If disabled, underlay traffic will collapse to admin_network
-# ipv6: true #This flag is only needed if cidr is not provided, and bridged_interface
-# is used for address auto detection.
-#
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
-# ipv6: true #This flag is only needed if cidr is not provided, and bridged_interface
-# is used for address auto detection.
-#
-#api_network:
-# enabled: false #If disabled, api_network traffic will collapse to admin network
-# ipv6: true #This flag is only needed if cidr is not provided, and bridged_interface
-# is used for address auto detection.
-#
-#General behavior description (today's behavior, not necessarily optimal):
-# - If a network has cidr field defined, no auto detection will be done. All missing fields will be generated according to CIDR.
-# - If cidr is not specified, bridged_interface field must be specified. IP detection will be done on the system.
-# In this case, an optional ipv6 field can be specified to indicate what kind of IP discovery takes place.
-# - It is assumed the supplied cidr has enough address to generate all field today.
-# - If a field is specified, no auto generation (from cidr) or auto detection (from bridged_interface) will be performed.
-# It is assumed the value specified is correct.
-# - Any networks can be specified to be IPv6, but only private, storage and public SHOULD. No check is performed to validate this.
-#
-#Other changes
-# - All IP addresses can now be IPv4 or IPv6, we will detect the correct family and configure accordingly.
-# Note that if any network is specified to be IPv6, we consider it a IPv6 deployment. IPv6 deployment does require additional
-# configurations in resulting network environment yaml, such as enabling ipv6 support for NOVA, RABBITMQ, etc.
-# \ No newline at end of file
+# Compass specific settings
+compass:
+
+# Apex specific settings
+apex:
+ networks:
+ admin:
+ introspection_range:
+ - 192.0.2.100
+ - 192.0.2.120 # Range used for introspection phase (examining nodes)
+# Fuel specific settings
+fuel:
diff --git a/config/network/network_settings_vlans.yaml b/config/network/network_settings_vlans.yaml
index 40d6183e..beeae477 100644
--- a/config/network/network_settings_vlans.yaml
+++ b/config/network/network_settings_vlans.yaml
@@ -1,102 +1,219 @@
# This configuration file defines Network Environment for a
# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
+# for 5 following networks:
#
# - admin
-# - private*
-# - public
+# - tenant*
+# - external*
# - storage*
-#
+# - api*
# *) optional networks
#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
+# if not explicitly configured.
#
# See short description of the networks in the comments below.
#
-
# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
+# This network should be IPv4 even it is an IPv6 deployment
+# IPv6 does not have PXE boot support.
+# During OPNFV deployment it is used for node provisioning which will require
+# PXE booting as well as running a DHCP server on this network. Be sure to
+# disable any other DHCP/TFTP server on this network.
#
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
-
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
+# "tenant" is the network used for tenant traffic.
#
-private_network:
- enabled: true
- vlan: 400
- cidr: 11.0.0.0/24
-
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
+# "external" is the network which should have internet or external
+# connectivity. External OpenStack networks will be configured to egress this
+# network. There can be multiple external networks, but only one assigned as
+# "public" which OpenStack public API's will register.
#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- vlan: 500
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220
- usable_ip_range: 192.168.37.10,192.168.37.199
- provisioner_ip: 192.168.37.1
-
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
+# "storage" is the network for storage I/O.
#
-storage_network:
- enabled: true
- vlan: 200
- cidr: 12.0.0.0/24
+# "api" is an optional network for splitting out OpenStack service API
+# communication. This should be used for IPv6 deployments.
+
+
+#Meta data for the network configuration
+network-config-metadata:
+ title: LF-POD-1 Network config
+ version: 0.1
+ created: Mon Dec 28 2015
+ comment: None
+
+# DNS Settings
+dns-domain: opnfvlf.org
+dns-search: opnfvlf.org
+dns_nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+# NTP servers
+ntp:
+ - 0.se.pool.ntp.org
+ - 1.se.pool.ntp.org
+# Syslog server
+syslog:
+ server: 10.128.1.24
+ transport: 'tcp'
+
+networks: # Network configurations
+ admin: # Admin configuration (pxe and jumpstart),
+ enabled: true
+ installer_vm: # Network settings for the Installer VM on admin network
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ vlan: native # VLAN tag to use for this network on Installer VM, native means none
+ ip: 192.0.2.1 # IP to assign to Installer VM on this network
+ usable_ip_range:
+ - 192.0.2.11
+ - 192.0.2.99 # Usable ip range, if empty entire range is usable
+ gateway: 192.0.2.1 # Gateway (only needed when public_network is disabled)
+ cidr: 192.0.2.0/24 # Subnet in CIDR format 192.168.1.0/24
+ dhcp_range:
+ - 192.0.2.2
+ - 192.0.2.10 # DHCP range for the admin network, if empty it will be automatically provisioned
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic1
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ members:
+ - nic1
+ #
+ tenant: # Tenant network configuration
+ enabled: true
+ cidr: 11.0.0.0/24 # Subnet in CIDR format 192.168.1.0/24
+ mtu: 64000 # Tenant network MTU
+ overlay_id_range: 2,65535 # Tenant network Overlay segmentation ID range:
+ # VNI, VLAN-ID, etc.
+ segmentation_type: vxlan # Tenant network segmentation type:
+ # vlan, vxlan, gre
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 401 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 401
+ members:
+ - nic1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ #
+ external: # Can contain 1 or more external networks
+ - public: # "public" network will be the network the installer VM attaches to
+ enabled: true
+ mtu: 64000 # Public network MTU
+ installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ vlan: 501
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ ip: 192.168.37.12 # IP to assign to Installer VM on this network
+ cidr: 192.168.37.0/24
+ gateway: 192.168.37.1
+ floating_ip_range:
+ - 192.168.37.200
+ - 192.168.37.220 # Range to allocate to floating IPs for the public network with Neutron
+ usable_ip_range:
+ - 192.168.37.10
+ - 192.168.37.199 # Usable IP range on the public network, usually this is a shared subnet
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 501 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic1
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 501
+ members:
+ - nic1
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: Public_internet
+ type: flat
+ gateway: 192.168.37.1
+ - private_cloud: # another external network
+ enabled: false
+ mtu: 64000
+ installer_vm: # Network settings for the Installer VM on admin network (note only valid on 'public' external network)
+ nic_type: interface # Indicates if this VM will be bridged to an interface, or to a bond
+ vlan: 501
+ members:
+ - em1 # Member Interface to bridge to for installer VM (use multiple values for bond)
+ ip: 192.168.38.12 # IP to assign to Installer VM on this network
+ cidr: 192.168.38.0/24
+ gateway: 192.168.38.1
+ floating_ip_range:
+ - 192.168.38.200
+ - 192.168.38.220 # Range to allocate to floating IPs for the public network with Neutron
+ usable_ip_range:
+ - 192.168.38.10
+ - 192.168.38.199 # Usable IP range on the public network, usually this is a shared subnet
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: interface # Physical interface type (interface or bond)
+ vlan: 502 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - eth1 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: interface
+ vlan: 502
+ members:
+ - eth1
+ external_overlay: # External network to be created in OpenStack by Services tenant
+ name: private_cloud
+ type: vlan
+ segmentation_id: 101
+ gateway: 192.168.38.1
+ #
+ storage: # Storage network configuration
+ enabled: true
+ cidr: 12.0.0.0/24 # Subnet in CIDR format
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: 201 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic4 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: 201
+ members:
+ - nic4
+ #
+ api: # API network configuration
+ enabled: false
+ cidr: fd00:fd00:fd00:4000::/64 # Subnet in CIDR format
+ mtu: 64000 # Tenant network MTU
+ nic_mapping: # Mapping of network configuration for Overcloud Nodes
+ compute: # Mapping for compute profile (nodes that will be used as Compute nodes)
+ phys_type: bond # Physical interface type (interface or bond)
+ vlan: 101 # VLAN tag to use with this NIC
+ members: # Physical NIC members of this mapping (Single value allowed for interface phys_type)
+ - nic5 # Note, for Apex you may also use the logical nic name (found by nic order), such as "nic1"
+ controller: # Mapping for controller profile (nodes that will be used as Controller nodes)
+ phys_type: bond
+ vlan: 101
+ members:
+ - nic5
+
+# JOID specific settings
+joid:
+
+# Compass specific settings
+compass:
-#admin_network:
-# enabled: true
-# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond
-# bridged_interface: '' #Interface to bridge to for installer VM
-# bond_interfaces: '' #Interfaces to create bond with for installer VM
-# vlan: native #VLAN tag to use, native means none
-# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
-# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected
-# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet
-# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
-# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned
-# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes)
-#private_network:
-# enabled: false #If disabled, internal api traffic will collapse to admin_network
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
+# Apex specific settings
+apex:
+ networks:
+ admin:
+ introspection_range:
+ - 192.0.2.100
+ - 192.0.2.120 # Range used for introspection phase (examining nodes)
+# Fuel specific settings
+fuel:
diff --git a/docs/installationprocedure/baremetal.rst b/docs/installationprocedure/baremetal.rst
index d41c77e0..2de6e8a8 100644
--- a/docs/installationprocedure/baremetal.rst
+++ b/docs/installationprocedure/baremetal.rst
@@ -230,11 +230,10 @@ You are now ready to deploy OPNFV using Apex!
Follow the steps below to execute:
1. Execute opnfv-deploy
- ``sudo opnfv-deploy [ --flat ] -n network_settings.yaml
+ ``sudo opnfv-deploy -n network_settings.yaml
-i inventory.yaml -d deploy_settings.yaml``
If you need more information about the options that can be passed to
- opnfv-deploy use ``opnfv-deploy --help`` --flat collapses all networks to a
- single nic, only uses the admin network from the network settings file. -n
+ opnfv-deploy use ``opnfv-deploy --help``. -n
network_settings.yaml allows you to customize your networking topology.
2. Wait while deployment is executed.
diff --git a/docs/installationprocedure/index.rst b/docs/installationprocedure/index.rst
index 0dd3d87b..83e9292e 100644
--- a/docs/installationprocedure/index.rst
+++ b/docs/installationprocedure/index.rst
@@ -15,6 +15,7 @@ Contents:
baremetal.rst
virtualinstall.rst
verification.rst
+ troubleshooting.rst
references.rst
:Authors: Tim Rozet (trozet@redhat.com)
diff --git a/docs/installationprocedure/troubleshooting.rst b/docs/installationprocedure/troubleshooting.rst
new file mode 100644
index 00000000..56dfa5be
--- /dev/null
+++ b/docs/installationprocedure/troubleshooting.rst
@@ -0,0 +1,144 @@
+Developer Guide and Troubleshooting
+===================================
+
+This section aims to explain in more detail the steps that Apex follows
+to make a deployment. It also tries to explain possible issues you might find
+in the process of building or deploying an environment.
+
+After installing the Apex RPMs in the jumphost, some files will be located
+around the system.
+
+1. /etc/opnfv-apex: this directory contains a bunch of scenarios to be
+ deployed with different characteristics such HA (High Availability), SDN
+ controller integration (OpenDaylight/ONOS), BGPVPN, FDIO, etc. Having a
+ look at any of these files will give you an idea of how to make a
+ customized scenario setting up different flags.
+
+2. /usr/bin/: it contains the binaries for the commands opnfv-deploy,
+ opnfv-clean and opnfv-util.
+
+3. /var/opt/opnfv/: it contains several files and directories.
+
+ 3.1. images/: this folder contains the images that will be deployed
+ according to the chosen scenario.
+
+ 3.2. lib/: bunch of scripts that will be executed in the different phases
+ of deployment.
+
+
+Utilization of Images
+---------------------
+
+As mentioned earlier in this guide, the Undercloud VM will be in charge of
+deploying OPNFV (Overcloud VMs). Since the Undercloud is an all-in-one
+OpenStack deployment, it will use Glance to manage the images that will be
+deployed as the Overcloud.
+
+So whatever customization that is done to the images located in the jumpserver
+(/var/opt/opnfv/images) will be uploaded to the undercloud and consequently, to
+the overcloud.
+
+Make sure, the customization is performed on the right image. For example, if I
+virt-customize the following image overcloud-full-opendaylight.qcow2, but then
+I deploy OPNFV with the following command:
+
+ ``sudo opnfv-deploy -n network_settings.yaml -d
+ /etc/opnfv-apex/os-onos-nofeature-ha.yaml``
+
+It will not have any effect over the deployment, since the customized image is
+the opendaylight one, and the scenario indicates that the image to be deployed
+is the overcloud-full-onos.qcow2.
+
+
+Post-deployment Configuration
+-----------------------------
+
+Post-deployment scripts will perform some configuration tasks such ssh-key
+injection, network configuration, NATing, OpenVswitch creation. It will take
+care of some OpenStack tasks such creation of endpoints, external networks,
+users, projects, etc.
+
+If any of these steps fail, the execution will be interrupted. In some cases,
+the interruption occurs at very early stages, so a new deployment must be
+executed. However, some other cases it could be worth it to try to debug it.
+
+ 1. There is not external connectivity from the overcloud nodes:
+
+ Post-deployment scripts will configure the routing, nameservers
+ and a bunch of other things between the overcloud and the
+ undercloud. If local connectivity, like pinging between the
+ different nodes, is working fine, script must have failed when
+ configuring the NAT via iptables. The main rules to enable
+ external connectivity would look like these:
+
+ ``iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE``
+ ``iptables -t nat -A POSTROUTING -s ${external_cidr} -o eth0 -j
+ MASQUERADE``
+ ``iptables -A FORWARD -i eth2 -j ACCEPT``
+ ``iptables -A FORWARD -s ${external_cidr} -m state --state
+ ESTABLISHED,RELATED -j ACCEPT``
+ ``service iptables save``
+
+ These rules must be executed as root (or sudo) in the
+ undercloud machine.
+
+OpenDaylight Integration
+------------------------
+
+When a user deploys any of the following scenarios:
+
+ - os-odl_l2-bgpvpn-ha.yaml
+ - os-odl_l2-fdio-ha.yaml
+ - os-odl_l2-fdio-noha.yaml
+ - os-odl_l2-nofeature-ha.yaml
+ - os-odl_l2-sfc-noha.yaml
+ - os-odl_l3-nofeature-ha.yaml
+
+OpenDaylight (ODL) SDN controller will be deployed too and completely
+integrated with OpenStack. ODL is running as a systemd service, so you can
+manage it as a regular service:
+
+ ``systemctl start/restart/stop opendaylight.service``
+
+This command must be executed as root in the controller node of the overcloud,
+where OpenDaylight is running. ODL files are located in /opt/opendaylight. ODL
+uses karaf as a Java container management system that allows the users to
+install new features, check logs and configure a lot of things. In order to
+connect to Karaf's console, use the following command:
+
+ ``opnfv-util opendaylight``
+
+This command is very easy to use, but in case it is not connecting to Karaf,
+this is the command that is executing underneath:
+
+ ``ssh -p 8101 -o UserKnownHostsFile=/dev/null -o
+ StrictHostKeyChecking=no karaf@localhost``
+
+Of course, localhost when the command is executed in the overcloud controller,
+but you use its public IP to connect from elsewhere.
+
+Debugging Failures
+------------------
+
+This section will try to gather different type of failures, the root cause and
+some possible solutions or workarounds to get the process continued.
+
+1. I can see in the output log a post-deployment error messages:
+
+ Heat resources will apply puppet manifests during this phase. If one of
+ these processes fail, you could try to see the error and after that,
+ re-run puppet to apply that manifest. Log into the controller (see
+ verification section for that) and check as root /var/log/messages.
+ Search for the error you have encountered and see if you can fix it. In
+ order to re-run the puppet manifest, search for "puppet apply" in that
+ same log. You will have to run the last "puppet apply" before the
+ error. And It should look like this:
+
+ ``FACTER_heat_outputs_path="/var/run/heat-config/heat-config-puppet/5b4c7a01-0d63-4a71-81e9-d5ee6f0a1f2f" FACTER_fqdn="overcloud-controller-0.localdomain.com" \
+ FACTER_deploy_config_name="ControllerOvercloudServicesDeployment_Step4" puppet apply --detailed-exitcodes -l syslog -l console \
+ /var/lib/heat-config/heat-config-puppet/5b4c7a01-0d63-4a71-81e9-d5ee6f0a1f2f.pp``
+
+ As a comment, Heat will trigger the puppet run via os-apply-config and
+ it will pass a different value for step each time. There is a total of
+ five steps. Some of these steps will not be executed depending on the
+ type of scenario that is being deployed.
diff --git a/docs/installationprocedure/virtualinstall.rst b/docs/installationprocedure/virtualinstall.rst
index 01971893..d2c81abe 100644
--- a/docs/installationprocedure/virtualinstall.rst
+++ b/docs/installationprocedure/virtualinstall.rst
@@ -49,7 +49,7 @@ environment will deploy with the following architecture:
Follow the steps below to execute:
1. ``sudo opnfv-deploy -v [ --virtual-computes n ]
- [ --virtual-cpus n ] [ --virtual-ram n ] [ --flat ]
+ [ --virtual-cpus n ] [ --virtual-ram n ]
-n network_settings.yaml -i inventory.yaml -d deploy_settings.yaml``
2. It will take approximately 45 minutes to an hour to stand up undercloud,
diff --git a/lib/common-functions.sh b/lib/common-functions.sh
index 6941093c..2d113450 100644
--- a/lib/common-functions.sh
+++ b/lib/common-functions.sh
@@ -33,12 +33,12 @@ function find_ip {
af=$2
fi
- python3.4 -B $LIB/python/apex_python_utils.py find-ip -i $1 -af $af
+ python3 -B $LIB/python/apex_python_utils.py find-ip -i $1 -af $af
}
##attach interface to OVS and set the network config correctly
##params: bride to attach to, interface to attach, network type (optional)
-##public indicates attaching to a public interface
+##external indicates attaching to a external interface
function attach_interface_to_ovs {
local bridge interface
local if_ip if_mask if_gw if_file ovs_file if_prefix
@@ -72,15 +72,15 @@ function attach_interface_to_ovs {
if [ -z "$if_mask" ]; then
# we can look for PREFIX here, then convert it to NETMASK
- if_prefix=$(sed -n 's/^PREFIX=\(.*\)$/\1/p' ${if_file})
+ if_prefix=$(sed -n 's/^PREFIX=[^0-9]*\([0-9][0-9]*\)[^0-9]*$/\1/p' ${if_file})
if_mask=$(prefix2mask ${if_prefix})
fi
if [[ -z "$if_ip" || -z "$if_mask" ]]; then
echo "ERROR: IPADDR or NETMASK/PREFIX missing for ${interface}"
return 1
- elif [[ -z "$if_gw" && "$3" == "public_network" ]]; then
- echo "ERROR: GATEWAY missing for ${interface}, which is public"
+ elif [[ -z "$if_gw" && "$3" == "external" ]]; then
+ echo "ERROR: GATEWAY missing for ${interface}, which is external"
return 1
fi
diff --git a/lib/configure-deps-functions.sh b/lib/configure-deps-functions.sh
index ffc764b4..1d238f87 100755
--- a/lib/configure-deps-functions.sh
+++ b/lib/configure-deps-functions.sh
@@ -33,13 +33,9 @@ function configure_deps {
systemctl status libvirtd || systemctl start libvirtd
systemctl status openvswitch || systemctl start openvswitch
- # If flat we only use admin network
- if [[ "$net_isolation_enabled" == "FALSE" ]]; then
- virsh_enabled_networks="admin_network"
- enabled_network_list="admin_network"
- # For baremetal we only need to create/attach Undercloud to admin and public
- elif [ "$virtual" == "FALSE" ]; then
- virsh_enabled_networks="admin_network public_network"
+ # For baremetal we only need to create/attach Undercloud to admin and external
+ if [ "$virtual" == "FALSE" ]; then
+ virsh_enabled_networks="admin external"
else
virsh_enabled_networks=$enabled_network_list
fi
@@ -54,7 +50,7 @@ function configure_deps {
for network in ${enabled_network_list}; do
echo "${blue}INFO: Creating Virsh Network: $network & OVS Bridge: ${NET_MAP[$network]}${reset}"
ovs-vsctl list-br | grep "^${NET_MAP[$network]}$" > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]}
- virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
+ virsh net-list --all | grep " $network " > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
<network>
<name>$network</name>
<forward mode='bridge'/>
@@ -62,7 +58,7 @@ function configure_deps {
<virtualport type='openvswitch'/>
</network>
EOF
- if ! (virsh net-list --all | grep $network > /dev/null); then
+ if ! (virsh net-list --all | grep " $network " > /dev/null); then
echo "${red}ERROR: unable to create network: ${network}${reset}"
exit 1;
fi
@@ -76,7 +72,7 @@ EOF
# bridge interfaces to correct OVS instances for baremetal deployment
for network in ${enabled_network_list}; do
- if [[ "$network" != "admin_network" && "$network" != "public_network" ]]; then
+ if [[ "$network" != "admin" && "$network" != "external" ]]; then
continue
fi
this_interface=$(eval echo \${${network}_bridged_interface})
@@ -100,7 +96,7 @@ EOF
exit 1
fi
echo "${blue}INFO: Creating Virsh Network: $network${reset}"
- virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
+ virsh net-list --all | grep " $network " > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
<network ipv6='yes'>
<name>$network</name>
<forward mode='bridge'/>
diff --git a/lib/overcloud-deploy-functions.sh b/lib/overcloud-deploy-functions.sh
index c7301fdd..2066f15a 100755
--- a/lib/overcloud-deploy-functions.sh
+++ b/lib/overcloud-deploy-functions.sh
@@ -110,7 +110,11 @@ EOF
-a overcloud-full.qcow2
fi
else
+ sudo sed -i '/NeutronOVSDataPathType:/c\ NeutronOVSDataPathType: netdev' /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum install -y /root/dpdk_rpms/*" \
+ --run-command "sed -i '/RuntimeDirectoryMode=.*/d' /usr/lib/systemd/system/openvswitch-nonetwork.service" \
+ --run-command "printf \"%s\\n\" RuntimeDirectoryMode=0775 Group=qemu UMask=0002 >> /usr/lib/systemd/system/openvswitch-nonetwork.service" \
+ --run-command "sed -i 's/\\(^\\s\\+\\)\\(start_daemon "$OVS_VSWITCHD_PRIORITY"\\)/\\1umask 0002 \\&\\& \\2/' /usr/share/openvswitch/scripts/ovs-ctl" \
-a overcloud-full.qcow2
fi
EOI
@@ -200,6 +204,10 @@ EOI
# make sure ceph is installed
DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml"
+ #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
+ DEPLOY_OPTIONS+=" -e network-environment.yaml"
+
+
# get number of nodes available in inventory
num_control_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:control /home/stack/instackenv.json")
num_compute_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:compute /home/stack/instackenv.json")
@@ -229,14 +237,7 @@ EOI
DEPLOY_OPTIONS+=" --compute-scale ${num_compute_nodes}"
fi
- if [[ "$net_isolation_enabled" == "TRUE" ]]; then
- #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
- DEPLOY_OPTIONS+=" -e network-environment.yaml"
- fi
-
- if [[ "$ha_enabled" == "True" ]] || [[ "$net_isolation_enabled" == "TRUE" ]]; then
- DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
- fi
+ DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
DEPLOY_OPTIONS+=" --control-flavor control --compute-flavor compute"
if [[ "$virtual" == "TRUE" ]]; then
diff --git a/lib/parse-functions.sh b/lib/parse-functions.sh
index 87453ea3..84da75c5 100755
--- a/lib/parse-functions.sh
+++ b/lib/parse-functions.sh
@@ -25,7 +25,7 @@ parse_network_settings() {
done
fi
- if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS $net_isolation_arg -td $APEX_TMP_DIR -e $CONFIG/network-environment.yaml $parse_ext); then
+ if output=$(python3 -B $LIB/python/apex_python_utils.py parse-net-settings -s $NETSETS -td $APEX_TMP_DIR -e $CONFIG/network-environment.yaml $parse_ext); then
echo -e "${blue}${output}${reset}"
eval "$output"
else
@@ -34,11 +34,7 @@ parse_network_settings() {
fi
if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- if [ "$net_isolation_enabled" == "FALSE" ]; then
- echo -e "${red}ERROR: flat network is not supported with ovs-dpdk ${reset}"
- exit 1
- fi
- if [[ ! $enabled_network_list =~ "private_network" ]]; then
+ if [[ ! $enabled_network_list =~ "tenant" ]]; then
echo -e "${red}ERROR: tenant network is not enabled for ovs-dpdk ${reset}"
exit 1
fi
diff --git a/lib/post-install-functions.sh b/lib/post-install-functions.sh
index 7e7db5ca..d21b8366 100755
--- a/lib/post-install-functions.sh
+++ b/lib/post-install-functions.sh
@@ -11,9 +11,9 @@
##Post configuration after install
##params: none
function configure_post_install {
- local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip af public_network_ipv6
- public_network_ipv6=False
- opnfv_attach_networks="admin_network public_network"
+ local opnfv_attach_networks ovs_ip ip_range net_cidr tmp_ip af external_network_ipv6
+ external_network_ipv6=False
+ opnfv_attach_networks="admin external"
echo -e "${blue}INFO: Post Install Configuration Running...${reset}"
@@ -46,8 +46,8 @@ EOI
af=4
else
af=6
- if [ "$network" == "public_network" ]; then
- public_network_ipv6=True
+ if [ "$network" == "external" ]; then
+ ublic_network_ipv6=True
fi
#enable ipv6 on bridge interface
echo 0 > /proc/sys/net/ipv6/conf/${NET_MAP[$network]}/disable_ipv6
@@ -87,15 +87,15 @@ EOI
source overcloudrc
set -o errexit
echo "Configuring Neutron external network"
-if [[ -n "$public_network_vlan" && "$public_network_vlan" != 'native' ]]; then
- neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --provider:network_type vlan --provider:segmentation_id ${public_network_vlan} --provider:physical_network datacentre
+if [[ -n "$external_nic_mapping_compute_vlan" && "$external_nic_mapping_compute_vlan" != 'native' ]]; then
+ neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --provider:network_type vlan --provider:segmentation_id ${external_nic_mapping_compute_vlan} --provider:physical_network datacentre
else
neutron net-create external --router:external=True --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }')
fi
-if [ "$public_network_ipv6" == "True" ]; then
- neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') external --ip_version 6 --ipv6_ra_mode slaac --ipv6_address_mode slaac --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
+if [ "$external_network_ipv6" == "True" ]; then
+ neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') external --ip_version 6 --ipv6_ra_mode slaac --ipv6_address_mode slaac --gateway ${external_gateway} --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} ${external_cidr}
else
- neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
+ neutron subnet-create --name external-net --tenant-id \$(openstack project show service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${external_gateway} --allocation-pool start=${external_floating_ip_range%%,*},end=${external_floating_ip_range##*,} ${external_cidr}
fi
echo "Removing sahara endpoint and service"
@@ -142,14 +142,14 @@ if [ "${deploy_options_array['congress']}" == 'True' ]; then
fi
EOI
- # for virtual, we NAT public network through Undercloud
+ # for virtual, we NAT external network through Undercloud
# same goes for baremetal if only jumphost has external connectivity
- if [ "$virtual" == "TRUE" ] || ! test_overcloud_connectivity && [ "$public_network_ipv6" != "True" ]; then
- if ! configure_undercloud_nat ${public_network_cidr}; then
- echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${public_network_cidr}${reset}"
+ if [ "$virtual" == "TRUE" ] || ! test_overcloud_connectivity && [ "$external_network_ipv6" != "True" ]; then
+ if ! configure_undercloud_nat ${external_cidr}; then
+ echo -e "${red}ERROR: Unable to NAT undercloud with external net: ${external_cidr}${reset}"
exit 1
else
- echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud public network${reset}"
+ echo -e "${blue}INFO: Undercloud VM has been setup to NAT Overcloud external network${reset}"
fi
fi
diff --git a/lib/python/apex/common/constants.py b/lib/python/apex/common/constants.py
index dfb6267b..db0a9fd1 100644
--- a/lib/python/apex/common/constants.py
+++ b/lib/python/apex/common/constants.py
@@ -7,12 +7,15 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-ADMIN_NETWORK = 'admin_network'
-PRIVATE_NETWORK = 'private_network'
-PUBLIC_NETWORK = 'public_network'
-STORAGE_NETWORK = 'storage_network'
-API_NETWORK = 'api_network'
-OPNFV_NETWORK_TYPES = [ADMIN_NETWORK, PRIVATE_NETWORK, PUBLIC_NETWORK,
+ADMIN_NETWORK = 'admin'
+TENANT_NETWORK = 'tenant'
+EXTERNAL_NETWORK = 'external'
+STORAGE_NETWORK = 'storage'
+API_NETWORK = 'api'
+CONTROLLER = 'controller'
+COMPUTE = 'compute'
+
+OPNFV_NETWORK_TYPES = [ADMIN_NETWORK, TENANT_NETWORK, EXTERNAL_NETWORK,
STORAGE_NETWORK, API_NETWORK]
DNS_SERVERS = ["8.8.8.8", "8.8.4.4"]
COMPUTE = 'compute'
diff --git a/lib/python/apex/network_environment.py b/lib/python/apex/network_environment.py
index 15fe873f..5cb2d0cd 100644
--- a/lib/python/apex/network_environment.py
+++ b/lib/python/apex/network_environment.py
@@ -10,10 +10,12 @@
import yaml
import re
from .common.constants import (
+ CONTROLLER,
+ COMPUTE,
ADMIN_NETWORK,
- PRIVATE_NETWORK,
+ TENANT_NETWORK,
STORAGE_NETWORK,
- PUBLIC_NETWORK,
+ EXTERNAL_NETWORK,
API_NETWORK,
CONTROLLER_PRE,
COMPUTE_PRE,
@@ -56,6 +58,9 @@ class NetworkEnvironment(dict):
"""
def __init__(self, net_settings, filename, compute_pre_config=False,
controller_pre_config=False):
+ """
+ Create Network Environment according to Network Settings
+ """
init_dict = {}
if type(filename) is str:
with open(filename, 'r') as net_env_fh:
@@ -63,109 +68,102 @@ class NetworkEnvironment(dict):
super().__init__(init_dict)
try:
- enabled_networks = net_settings.enabled_network_list
+ enabled_nets = net_settings.enabled_network_list
except:
raise NetworkEnvException('Invalid Network Setting object')
self._set_tht_dir()
- enabled_networks = net_settings.get_enabled_networks()
+ nets = net_settings['networks']
- admin_cidr = net_settings[ADMIN_NETWORK]['cidr']
+ admin_cidr = nets[ADMIN_NETWORK]['cidr']
admin_prefix = str(admin_cidr.prefixlen)
self[param_def]['ControlPlaneSubnetCidr'] = admin_prefix
self[param_def]['ControlPlaneDefaultRoute'] = \
- net_settings[ADMIN_NETWORK]['provisioner_ip']
- public_cidr = net_settings[PUBLIC_NETWORK]['cidr']
- self[param_def]['ExternalNetCidr'] = str(public_cidr)
- if net_settings[PUBLIC_NETWORK]['vlan'] != 'native':
- self[param_def]['NeutronExternalNetworkBridge'] = '""'
- self[param_def]['ExternalNetworkVlanID'] = \
- net_settings[PUBLIC_NETWORK]['vlan']
- public_range = \
- net_settings[PUBLIC_NETWORK]['usable_ip_range'].split(',')
- self[param_def]['ExternalAllocationPools'] = \
- [{'start':
- public_range[0],
- 'end': public_range[1]
- }]
- self[param_def]['ExternalInterfaceDefaultRoute'] = \
- net_settings[PUBLIC_NETWORK]['gateway']
+ nets[ADMIN_NETWORK]['installer_vm']['ip']
self[param_def]['EC2MetadataIp'] = \
- net_settings[ADMIN_NETWORK]['provisioner_ip']
+ nets[ADMIN_NETWORK]['installer_vm']['ip']
self[param_def]['DnsServers'] = net_settings['dns_servers']
- if public_cidr.version == 6:
- postfix = '/external_v6.yaml'
+ if EXTERNAL_NETWORK in enabled_nets:
+ external_cidr = nets[EXTERNAL_NETWORK][0]['cidr']
+ self[param_def]['ExternalNetCidr'] = str(external_cidr)
+ if type(nets[EXTERNAL_NETWORK][0]['installer_vm']['vlan']) is int:
+ self[param_def]['NeutronExternalNetworkBridge'] = '""'
+ self[param_def]['ExternalNetworkVlanID'] = \
+ nets[EXTERNAL_NETWORK][0]['installer_vm']['vlan']
+ external_range = nets[EXTERNAL_NETWORK][0]['usable_ip_range']
+ self[param_def]['ExternalAllocationPools'] = \
+ [{'start': str(external_range[0]),
+ 'end': str(external_range[1])}]
+ self[param_def]['ExternalInterfaceDefaultRoute'] = \
+ nets[EXTERNAL_NETWORK][0]['gateway']
+
+ if external_cidr.version == 6:
+ postfix = '/external_v6.yaml'
+ else:
+ postfix = '/external.yaml'
else:
- postfix = '/external.yaml'
+ postfix = '/noop.yaml'
# apply resource registry update for EXTERNAL_RESOURCES
self._config_resource_reg(EXTERNAL_RESOURCES, postfix)
- if PRIVATE_NETWORK in enabled_networks:
- priv_range = net_settings[PRIVATE_NETWORK][
- 'usable_ip_range'].split(',')
+ if TENANT_NETWORK in enabled_nets:
+ tenant_range = nets[TENANT_NETWORK]['usable_ip_range']
self[param_def]['TenantAllocationPools'] = \
- [{'start':
- priv_range[0],
- 'end': priv_range[1]
- }]
- priv_cidr = net_settings[PRIVATE_NETWORK]['cidr']
- self[param_def]['TenantNetCidr'] = str(priv_cidr)
- if priv_cidr.version == 6:
+ [{'start': str(tenant_range[0]),
+ 'end': str(tenant_range[1])}]
+ tenant_cidr = nets[TENANT_NETWORK]['cidr']
+ self[param_def]['TenantNetCidr'] = str(tenant_cidr)
+ if tenant_cidr.version == 6:
postfix = '/tenant_v6.yaml'
else:
postfix = '/tenant.yaml'
- if net_settings[PRIVATE_NETWORK]['vlan'] != 'native':
- self[param_def]['TenantNetworkVlanID'] = \
- net_settings[PRIVATE_NETWORK]['vlan']
+
+ tenant_vlan = self._get_vlan(nets[TENANT_NETWORK])
+ if type(tenant_vlan) is int:
+ self[param_def]['TenantNetworkVlanID'] = tenant_vlan
else:
postfix = '/noop.yaml'
# apply resource registry update for TENANT_RESOURCES
self._config_resource_reg(TENANT_RESOURCES, postfix)
- if STORAGE_NETWORK in enabled_networks:
- storage_range = net_settings[STORAGE_NETWORK][
- 'usable_ip_range'].split(',')
+ if STORAGE_NETWORK in enabled_nets:
+ storage_range = nets[STORAGE_NETWORK]['usable_ip_range']
self[param_def]['StorageAllocationPools'] = \
- [{'start':
- storage_range[0],
- 'end':
- storage_range[1]
- }]
- storage_cidr = net_settings[STORAGE_NETWORK]['cidr']
+ [{'start': str(storage_range[0]),
+ 'end': str(storage_range[1])}]
+ storage_cidr = nets[STORAGE_NETWORK]['cidr']
self[param_def]['StorageNetCidr'] = str(storage_cidr)
if storage_cidr.version == 6:
postfix = '/storage_v6.yaml'
else:
postfix = '/storage.yaml'
- if net_settings[STORAGE_NETWORK]['vlan'] != 'native':
- self[param_def]['StorageNetworkVlanID'] = \
- net_settings[STORAGE_NETWORK]['vlan']
+ storage_vlan = self._get_vlan(nets[STORAGE_NETWORK])
+ if type(storage_vlan) is int:
+ self[param_def]['StorageNetworkVlanID'] = storage_vlan
else:
postfix = '/noop.yaml'
# apply resource registry update for STORAGE_RESOURCES
self._config_resource_reg(STORAGE_RESOURCES, postfix)
- if API_NETWORK in enabled_networks:
- api_range = net_settings[API_NETWORK][
- 'usable_ip_range'].split(',')
+ if API_NETWORK in enabled_nets:
+ api_range = nets[API_NETWORK]['usable_ip_range']
self[param_def]['InternalApiAllocationPools'] = \
- [{'start': api_range[0],
- 'end': api_range[1]
- }]
- api_cidr = net_settings[API_NETWORK]['cidr']
+ [{'start': str(api_range[0]),
+ 'end': str(api_range[1])}]
+ api_cidr = nets[API_NETWORK]['cidr']
self[param_def]['InternalApiNetCidr'] = str(api_cidr)
if api_cidr.version == 6:
postfix = '/internal_api_v6.yaml'
else:
postfix = '/internal_api.yaml'
- if net_settings[API_NETWORK]['vlan'] != 'native':
- self[param_def]['InternalApiNetworkVlanID'] = \
- net_settings[API_NETWORK]['vlan']
+ api_vlan = self._get_vlan(nets[API_NETWORK])
+ if type(api_vlan) is int:
+ self[param_def]['InternalApiNetworkVlanID'] = api_vlan
else:
postfix = '/noop.yaml'
@@ -184,6 +182,14 @@ class NetworkEnvironment(dict):
for flag in IPV6_FLAGS:
self[param_def][flag] = True
+ def _get_vlan(self, network):
+ if type(network['nic_mapping'][CONTROLLER]['vlan']) is int:
+ return network['nic_mapping'][CONTROLLER]['vlan']
+ elif type(network['nic_mapping'][COMPUTE]['vlan']) is int:
+ return network['nic_mapping'][COMPUTE]['vlan']
+ else:
+ return 'native'
+
def _set_tht_dir(self):
self.tht_dir = None
for key, prefix in TENANT_RESOURCES.items():
diff --git a/lib/python/apex/network_settings.py b/lib/python/apex/network_settings.py
index 8e39afd6..006d18c3 100644
--- a/lib/python/apex/network_settings.py
+++ b/lib/python/apex/network_settings.py
@@ -10,20 +10,21 @@
import yaml
import logging
import ipaddress
+
+from copy import copy
+
from . import ip_utils
-from .common.utils import str2bool
+from .common import utils
from .common.constants import (
+ CONTROLLER,
+ COMPUTE,
+ ROLES,
+ DOMAIN_NAME,
+ DNS_SERVERS,
ADMIN_NETWORK,
- PRIVATE_NETWORK,
- PUBLIC_NETWORK,
- STORAGE_NETWORK,
- API_NETWORK,
+ EXTERNAL_NETWORK,
OPNFV_NETWORK_TYPES,
- DNS_SERVERS,
- DOMAIN_NAME,
- ROLES,
- COMPUTE,
- CONTROLLER)
+)
class NetworkSettings(dict):
@@ -38,7 +39,7 @@ class NetworkSettings(dict):
for deploy.sh consumption. This object will later be used directly as
deployment script move to python.
"""
- def __init__(self, filename, network_isolation):
+ def __init__(self, filename):
init_dict = {}
if type(filename) is str:
with open(filename, 'r') as network_settings_file:
@@ -46,7 +47,6 @@ class NetworkSettings(dict):
else:
# assume input is a dict to build from
init_dict = filename
-
super().__init__(init_dict)
if 'apex' in self:
@@ -63,52 +63,56 @@ class NetworkSettings(dict):
# merge the apex specific config into the first class settings
merge(self, copy(self['apex']))
- self.network_isolation = network_isolation
self.enabled_network_list = []
self.nics = {COMPUTE: {}, CONTROLLER: {}}
self.nics_specified = {COMPUTE: False, CONTROLLER: False}
self._validate_input()
+ def get_network(self, network):
+ if network == EXTERNAL_NETWORK and self['networks'][network]:
+ return self['networks'][network][0]
+ else:
+ return self['networks'][network]
+
def _validate_input(self):
"""
Validates the network settings file and populates all fields.
NetworkSettingsException will be raised if validation fails.
"""
- if ADMIN_NETWORK not in self or \
- not str2bool(self[ADMIN_NETWORK].get(
- 'enabled')):
- raise NetworkSettingsException("You must enable admin_network "
- "and configure it explicitly or "
- "use auto-detection")
- if self.network_isolation and \
- (PUBLIC_NETWORK not in self or not
- str2bool(self[PUBLIC_NETWORK].get(
- 'enabled'))):
- raise NetworkSettingsException("You must enable public_network "
+ if not self['networks'].get(ADMIN_NETWORK, {}).get('enabled', False):
+ raise NetworkSettingsException("You must enable admin network "
"and configure it explicitly or "
"use auto-detection")
for network in OPNFV_NETWORK_TYPES:
- if network in self:
- if str2bool(self[network].get('enabled')):
+ if network in self['networks']:
+ _network = self.get_network(network)
+ if _network.get('enabled', True):
logging.info("{} enabled".format(network))
self._config_required_settings(network)
+ if network == EXTERNAL_NETWORK:
+ nicmap = _network['nic_mapping']
+ else:
+ nicmap = _network['nic_mapping']
+ iface = nicmap[CONTROLLER]['members'][0]
self._config_ip_range(network=network,
- setting='usable_ip_range',
+ interface=iface,
+ ip_range='usable_ip_range',
start_offset=21, end_offset=21)
- self._config_optional_settings(network)
self.enabled_network_list.append(network)
self._validate_overcloud_nic_order(network)
+ # TODO self._config_optional_settings(network)
else:
logging.info("{} disabled, will collapse with "
- "admin_network".format(network))
+ "admin network".format(network))
else:
logging.info("{} is not in specified, will collapse with "
- "admin_network".format(network))
+ "admin network".format(network))
+ if 'dns-domain' not in self:
+ self['domain_name'] = DOMAIN_NAME
self['dns_servers'] = self.get('dns_servers', DNS_SERVERS)
- self['domain_name'] = self.get('domain_name', DOMAIN_NAME)
def _validate_overcloud_nic_order(self, network):
"""
@@ -116,42 +120,35 @@ class NetworkSettings(dict):
for network
If nic order is specified in a network for a profile, it should be
- specified for every network with that profile other than admin_network
+ specified for every network with that profile other than admin network
Duplicate nic names are also not allowed across different networks
:param network: network to detect if nic order present
:return: None
"""
-
for role in ROLES:
- interface = role+'_interface'
- nic_index = self.get_enabled_networks().index(network) + 1
- if interface in self[network]:
- if any(y == self[network][interface] for x, y in
- self.nics[role].items()):
- raise NetworkSettingsException("Duplicate {} already "
- "specified for "
- "another network"
- .format(self[network]
- [interface]))
- self.nics[role][network] = self[network][interface]
+ _network = self.get_network(network)
+ _nicmap = _network.get('nic_mapping', {})
+ _role = _nicmap.get(role, {})
+ interfaces = _role.get('members', [])
+
+ if interfaces:
+ interface = interfaces[0]
+ if type(_role.get('vlan', 'native')) is not int and \
+ any(y == interface for x, y in self.nics[role].items()):
+ raise NetworkSettingsException(
+ "Duplicate {} already specified for "
+ "another network".format(interface))
+ self.nics[role][network] = interface
self.nics_specified[role] = True
logging.info("{} nic order specified for network {"
"}".format(role, network))
- elif self.nics_specified[role]:
- logging.error("{} nic order not specified for network {"
- "}".format(role, network))
- raise NetworkSettingsException("Must specify {} for all "
- "enabled networks (other than "
- " admin) or not specify it for "
- "any".format(interface))
else:
- logging.info("{} nic order not specified for network {"
- "}. Will use logical default "
- "nic{}".format(interface, network, nic_index))
- self.nics[role][network] = 'nic' + str(nic_index)
- nic_index += 1
+ raise NetworkSettingsException(
+ "Interface members are not supplied for {} network "
+ "for the {} role. Please add nic assignments"
+ "".format(network, role))
def _config_required_settings(self, network):
"""
@@ -164,85 +161,93 @@ class NetworkSettings(dict):
given NIC in the system. The resulting config in settings object will
be an ipaddress.network object, replacing the NIC name.
"""
+ _network = self.get_network(network)
# if vlan not defined then default it to native
if network is not ADMIN_NETWORK:
- if 'vlan' not in self[network]:
- self[network]['vlan'] = 'native'
+ for role in ROLES:
+ if 'vlan' not in _network['nic_mapping'][role]:
+ _network['nic_mapping'][role]['vlan'] = 'native'
- cidr = self[network].get('cidr')
- nic_name = self[network].get('bridged_interface')
+ cidr = _network.get('cidr')
if cidr:
- cidr = ipaddress.ip_network(self[network]['cidr'])
- self[network]['cidr'] = cidr
+ cidr = ipaddress.ip_network(_network['cidr'])
+ _network['cidr'] = cidr
logging.info("{}_cidr: {}".format(network, cidr))
- return 0
- elif nic_name:
+ elif 'installer_vm' in _network:
+ ucloud_if_list = _network['installer_vm']['members']
# If cidr is not specified, we need to know if we should find
# IPv6 or IPv4 address on the interface
- if str2bool(self[network].get('ipv6')):
- address_family = 6
- else:
- address_family = 4
- nic_interface = ip_utils.get_interface(nic_name, address_family)
- if nic_interface:
- self[network]['bridged_interface'] = nic_interface
+ ip = ipaddress.ip_address(_network['installer_vm']['ip'])
+ nic_if = ip_utils.get_interface(ucloud_if_list[0], ip.version)
+ if nic_if:
+ ucloud_if_list = [nic_if]
logging.info("{}_bridged_interface: {}".
- format(network, nic_interface))
- return 0
+ format(network, nic_if))
else:
- raise NetworkSettingsException("Auto detection failed for {}: "
- "Unable to find valid ip for "
- "interface {}"
- .format(network, nic_name))
+ raise NetworkSettingsException(
+ "Auto detection failed for {}: Unable to find valid "
+ "ip for interface {}".format(network, ucloud_if_list[0]))
else:
- raise NetworkSettingsException("Auto detection failed for {}: "
- "either bridge_interface or cidr "
- "must be specified"
- .format(network))
+ raise NetworkSettingsException(
+ "Auto detection failed for {}: either installer_vm "
+ "members or cidr must be specified".format(network))
- def _config_ip_range(self, network, setting, start_offset=None,
- end_offset=None, count=None):
+ # undercloud settings
+ if network == ADMIN_NETWORK:
+ provisioner_ip = _network['installer_vm']['ip']
+ iface = _network['installer_vm']['members'][0]
+ if not provisioner_ip:
+ _network['installer_vm']['ip'] = self._gen_ip(network, 1)
+ self._config_ip_range(network=network, interface=iface,
+ ip_range='dhcp_range',
+ start_offset=2, count=9)
+ self._config_ip_range(network=network, interface=iface,
+ ip_range='introspection_range',
+ start_offset=11, count=9)
+ elif network == EXTERNAL_NETWORK:
+ provisioner_ip = _network['installer_vm']['ip']
+ iface = _network['installer_vm']['members'][0]
+ if not provisioner_ip:
+ _network['installer_vm']['ip'] = self._gen_ip(network, 1)
+ self._config_ip_range(network=network, interface=iface,
+ ip_range='floating_ip_range',
+ end_offset=2, count=20)
+
+ gateway = _network['gateway']
+ interface = _network['installer_vm']['ip']
+ self._config_gateway(network, gateway, interface)
+
+ def _config_ip_range(self, network, ip_range, interface=None,
+ start_offset=None, end_offset=None, count=None):
"""
Configures IP range for a given setting.
-
If the setting is already specified, no change will be made.
-
The spec for start_offset, end_offset and count are identical to
ip_utils.get_ip_range.
"""
- ip_range = self[network].get(setting)
- interface = self[network].get('bridged_interface')
-
- if not ip_range:
- cidr = self[network].get('cidr')
- ip_range = ip_utils.get_ip_range(start_offset=start_offset,
- end_offset=end_offset,
- count=count,
- cidr=cidr,
- interface=interface)
- self[network][setting] = ip_range
-
- logging.info("{}_{}: {}".format(network, setting, ip_range))
-
- def _config_ip(self, network, setting, offset):
+ _network = self.get_network(network)
+ if ip_range not in _network:
+ cidr = _network.get('cidr')
+ _ip_range = ip_utils.get_ip_range(start_offset=start_offset,
+ end_offset=end_offset,
+ count=count,
+ cidr=cidr,
+ interface=interface)
+ _network[ip_range] = _ip_range.split(',')
+
+ logging.info("Config IP Range: {} {}".format(network, ip_range))
+
+ def _gen_ip(self, network, offset):
"""
- Configures IP for a given setting.
-
- If the setting is already specified, no change will be made.
-
- The spec for offset is identical to ip_utils.get_ip
+ Generate and ip offset within the given network
"""
- ip = self[network].get(setting)
- interface = self[network].get('bridged_interface')
-
- if not ip:
- cidr = self[network].get('cidr')
- ip = ip_utils.get_ip(offset, cidr, interface)
- self[network][setting] = ip
-
- logging.info("{}_{}: {}".format(network, setting, ip))
+ _network = self.get_network(network)
+ cidr = _network.get('cidr')
+ ip = ip_utils.get_ip(offset, cidr)
+ logging.info("Config IP: {} {}".format(network, ip))
+ return ip
def _config_optional_settings(self, network):
"""
@@ -257,42 +262,41 @@ class NetworkSettings(dict):
- gateway
"""
if network == ADMIN_NETWORK:
- self._config_ip(network, 'provisioner_ip', 1)
- self._config_ip_range(network=network, setting='dhcp_range',
+ self._config_ip(network, None, 'provisioner_ip', 1)
+ self._config_ip_range(network=network,
+ ip_range='dhcp_range',
start_offset=2, count=9)
self._config_ip_range(network=network,
- setting='introspection_range',
+ ip_range='introspection_range',
start_offset=11, count=9)
- elif network == PUBLIC_NETWORK:
- self._config_ip(network, 'provisioner_ip', 1)
+ elif network == EXTERNAL_NETWORK:
+ self._config_ip(network, None, 'provisioner_ip', 1)
self._config_ip_range(network=network,
- setting='floating_ip_range',
+ ip_range='floating_ip_range',
end_offset=2, count=20)
self._config_gateway(network)
- def _config_gateway(self, network):
+ def _config_gateway(self, network, gateway, interface):
"""
Configures gateway setting for a given network.
If cidr is specified, we always use the first address in the address
space for gateway. Otherwise, we detect the system gateway.
"""
- gateway = self[network].get('gateway')
- interface = self[network].get('bridged_interface')
-
+ _network = self.get_network(network)
if not gateway:
- cidr = self[network].get('cidr')
+ cidr = _network.get('cidr')
if cidr:
- gateway = ip_utils.get_ip(1, cidr)
+ _gateway = ip_utils.get_ip(1, cidr)
else:
- gateway = ip_utils.find_gateway(interface)
+ _gateway = ip_utils.find_gateway(interface)
- if gateway:
- self[network]['gateway'] = gateway
+ if _gateway:
+ _network['gateway'] = _gateway
else:
raise NetworkSettingsException("Failed to set gateway")
- logging.info("{}_gateway: {}".format(network, gateway))
+ logging.info("Config Gateway: {} {}".format(network, gateway))
def dump_bash(self, path=None):
"""
@@ -301,45 +305,50 @@ class NetworkSettings(dict):
If optional path is provided, bash string will be written to the file
instead of stdout.
"""
+ def flatten(name, obj, delim=','):
+ """
+ flatten lists to delim separated strings
+ flatten dics to underscored key names and string values
+ """
+ if type(obj) is list:
+ return "{}=\'{}\'\n".format(name,
+ delim.join(map(lambda x: str(x),
+ obj)))
+ elif type(obj) is dict:
+ flat_str = ''
+ for k in obj:
+ flat_str += flatten("{}_{}".format(name, k), obj[k])
+ return flat_str
+ elif type(obj) is str:
+ return "{}='{}'\n".format(name, obj)
+ else:
+ return "{}={}\n".format(name, str(obj))
+
bash_str = ''
for network in self.enabled_network_list:
- for key, value in self[network].items():
- bash_str += "{}_{}={}\n".format(network, key, value)
- bash_str += "enabled_network_list='{}'\n" \
- .format(' '.join(self.enabled_network_list))
- bash_str += "ip_addr_family={}\n".format(self.get_ip_addr_family())
- dns_list = ""
- for dns_server in self['dns_servers']:
- dns_list = dns_list + "{} ".format(dns_server)
- dns_list = dns_list.strip()
- bash_str += "dns_servers=\'{}\'\n".format(dns_list)
- bash_str += "domain_name=\'{}\'\n".format(self['domain_name'])
+ _network = self.get_network(network)
+ bash_str += flatten(network, _network)
+ bash_str += flatten('enabled_network_list',
+ self.enabled_network_list, ' ')
+ bash_str += flatten('ip_addr_family', self.get_ip_addr_family())
+ bash_str += flatten('dns_servers', self['dns_servers'], ' ')
+ bash_str += flatten('domain_name', self['dns-domain'], ' ')
if path:
with open(path, 'w') as file:
file.write(bash_str)
else:
print(bash_str)
- def get_ip_addr_family(self):
+ def get_ip_addr_family(self,):
"""
Returns IP address family for current deployment.
If any enabled network has IPv6 CIDR, the deployment is classified as
IPv6.
"""
- for network in self.enabled_network_list:
- cidr = ipaddress.ip_network(self[network]['cidr'])
- if cidr.version == 6:
- return 6
-
- return 4
-
- def get_enabled_networks(self):
- """
- Getter for enabled network list
- :return: list of enabled networks
- """
- return self.enabled_network_list
+ return max([
+ ipaddress.ip_network(self.get_network(n)['cidr']).version
+ for n in self.enabled_network_list])
class NetworkSettingsException(Exception):
diff --git a/lib/python/apex_python_utils.py b/lib/python/apex_python_utils.py
index 9d6110bb..b0ebb270 100755
--- a/lib/python/apex_python_utils.py
+++ b/lib/python/apex_python_utils.py
@@ -14,8 +14,6 @@ import logging
import os
import yaml
-from copy import copy
-
from jinja2 import Environment
from jinja2 import FileSystemLoader
@@ -35,11 +33,8 @@ def parse_net_settings(args):
Args:
- file: string
file to network_settings.yaml file
- - network_isolation: bool
- enable or disable network_isolation
"""
- settings = NetworkSettings(args.net_settings_file,
- args.network_isolation)
+ settings = NetworkSettings(args.net_settings_file)
net_env = NetworkEnvironment(settings, args.net_env_file,
args.compute_pre_config,
args.controller_pre_config)
@@ -108,25 +103,15 @@ def build_nic_template(args):
"""
template_dir, template = args.template.rsplit('/', 1)
- netsets = NetworkSettings(args.net_settings_file,
- args.network_isolation)
+ netsets = NetworkSettings(args.net_settings_file)
env = Environment(loader=FileSystemLoader(template_dir), autoescape=True)
template = env.get_template(template)
- # gather vlan values into a dict
- net_list = copy(netsets.enabled_network_list)
- net_list.remove(ADMIN_NETWORK)
- vlans_vals = map(lambda x: netsets[x]['vlan'], net_list)
- vlans = dict(zip(net_list, vlans_vals))
- nics = netsets.nics
-
- print(template.render(enabled_networks=netsets.enabled_network_list,
+ print(template.render(nets=netsets['networks'],
role=args.role,
- vlans=vlans,
+ external_net_af=netsets.get_ip_addr_family(),
external_net_type=args.ext_net_type,
- external_net_af=args.address_family,
- ovs_dpdk_bridge=args.ovs_dpdk_bridge,
- nics=nics))
+ ovs_dpdk_bridge=args.ovs_dpdk_bridge))
def get_parser():
@@ -143,9 +128,6 @@ def get_parser():
default='network-settings.yaml',
dest='net_settings_file',
help='path to network settings file')
- net_settings.add_argument('--flat', action='store_false',
- default=True, dest='network_isolation',
- help='disable network isolation')
net_settings.add_argument('-e', '--net-env-file',
default="network-environment.yaml",
dest='net_env_file',
@@ -189,15 +171,10 @@ def get_parser():
default='network-settings.yaml',
dest='net_settings_file',
help='path to network settings file')
- nic_template.add_argument('--flat', action='store_false',
- default=True, dest='network_isolation',
- help='disable network isolation')
nic_template.add_argument('-e', '--ext-net-type', default='interface',
dest='ext_net_type',
choices=['interface', 'br-ex'],
help='External network type')
- nic_template.add_argument('-af', '--address-family', type=int, default=4,
- dest='address_family', help='IP address family')
nic_template.add_argument('-d', '--ovs-dpdk-bridge',
default=None, dest='ovs_dpdk_bridge',
help='OVS DPDK Bridge Name')
diff --git a/lib/undercloud-functions.sh b/lib/undercloud-functions.sh
index ccf39c02..98552f29 100755
--- a/lib/undercloud-functions.sh
+++ b/lib/undercloud-functions.sh
@@ -12,9 +12,9 @@
##params: none
function setup_undercloud_vm {
if ! virsh list --all | grep undercloud > /dev/null; then
- undercloud_nets="default admin_network"
- if [[ $enabled_network_list =~ "public_network" ]]; then
- undercloud_nets+=" public_network"
+ undercloud_nets="default admin"
+ if [[ $enabled_network_list =~ "external" ]]; then
+ undercloud_nets+=" external"
fi
define_vm undercloud hd 30 "$undercloud_nets" 4 12288
@@ -120,32 +120,31 @@ function configure_undercloud {
local controller_nic_template compute_nic_template
echo
echo "Copying configuration files to Undercloud"
- if [[ "$net_isolation_enabled" == "TRUE" ]]; then
- echo -e "${blue}Network Environment set for Deployment: ${reset}"
- cat $APEX_TMP_DIR/network-environment.yaml
- scp ${SSH_OPTIONS[@]} $APEX_TMP_DIR/network-environment.yaml "stack@$UNDERCLOUD":
-
- # check for ODL L3/ONOS
- if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
- ext_net_type=br-ex
- fi
-
- if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
- ovs_dpdk_bridge='br-phy'
- else
- ovs_dpdk_bridge=''
- fi
-
- if ! controller_nic_template=$(python3.4 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS $net_isolation_arg -t $CONFIG/nics-template.yaml.jinja2 -e "br-ex" -af $ip_addr_family); then
- echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
- exit 1
- fi
+ echo -e "${blue}Network Environment set for Deployment: ${reset}"
+ cat $APEX_TMP_DIR/network-environment.yaml
+ scp ${SSH_OPTIONS[@]} $APEX_TMP_DIR/network-environment.yaml "stack@$UNDERCLOUD":
- if ! compute_nic_template=$(python3.4 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS $net_isolation_arg -t $CONFIG/nics-template.yaml.jinja2 -e $ext_net_type -af $ip_addr_family -d "$ovs_dpdk_bridge"); then
- echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
- exit 1
- fi
- ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
+ # check for ODL L3/ONOS
+ if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
+ ext_net_type=br-ex
+ fi
+
+ if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
+ ovs_dpdk_bridge='br-phy'
+ else
+ ovs_dpdk_bridge=''
+ fi
+
+ if ! controller_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r controller -s $NETSETS -t $CONFIG/nics-template.yaml.jinja2 -e "br-ex"); then
+ echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
+ exit 1
+ fi
+
+ if ! compute_nic_template=$(python3 -B $LIB/python/apex_python_utils.py nic-template -r compute -s $NETSETS -t $CONFIG/nics-template.yaml.jinja2 -e $ext_net_type -d "$ovs_dpdk_bridge"); then
+ echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
+ exit 1
+ fi
+ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
mkdir nics/
cat > nics/controller.yaml << EOF
$controller_nic_template
@@ -154,7 +153,6 @@ cat > nics/compute.yaml << EOF
$compute_nic_template
EOF
EOI
- fi
# ensure stack user on Undercloud machine has an ssh key
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "if [ ! -e ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa; fi"
@@ -189,27 +187,24 @@ EOI
echo "Running undercloud configuration."
echo "Logging undercloud configuration to undercloud:/home/stack/apex-undercloud-install.log"
ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
-if [[ "$net_isolation_enabled" == "TRUE" ]]; then
- sed -i 's/#local_ip/local_ip/' undercloud.conf
- sed -i 's/#network_gateway/network_gateway/' undercloud.conf
- sed -i 's/#network_cidr/network_cidr/' undercloud.conf
- sed -i 's/#dhcp_start/dhcp_start/' undercloud.conf
- sed -i 's/#dhcp_end/dhcp_end/' undercloud.conf
- sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf
- sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf
-
- openstack-config --set undercloud.conf DEFAULT local_ip ${admin_network_provisioner_ip}/${admin_network_cidr##*/}
- openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_network_provisioner_ip}
- openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_network_cidr}
- openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_network_dhcp_range%%,*}
- openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_network_dhcp_range##*,}
- openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_network_introspection_range}
- openstack-config --set undercloud.conf DEFAULT undercloud_debug false
- openstack-config --set undercloud.conf DEFAULT undercloud_hostname "undercloud.${domain_name}"
- sudo openstack-config --set /etc/ironic/ironic.conf disk_utils iscsi_verify_attempts 30
- sudo openstack-config --set /etc/ironic/ironic.conf disk_partitioner check_device_max_retries 40
-
-fi
+sed -i 's/#local_ip/local_ip/' undercloud.conf
+sed -i 's/#network_gateway/network_gateway/' undercloud.conf
+sed -i 's/#network_cidr/network_cidr/' undercloud.conf
+sed -i 's/#dhcp_start/dhcp_start/' undercloud.conf
+sed -i 's/#dhcp_end/dhcp_end/' undercloud.conf
+sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf
+sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf
+
+openstack-config --set undercloud.conf DEFAULT local_ip ${admin_installer_vm_ip}/${admin_cidr##*/}
+openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_installer_vm_ip}
+openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_cidr}
+openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_dhcp_range%%,*}
+openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_dhcp_range##*,}
+openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_introspection_range}
+openstack-config --set undercloud.conf DEFAULT undercloud_debug false
+openstack-config --set undercloud.conf DEFAULT undercloud_hostname "undercloud.${domain_name}"
+sudo openstack-config --set /etc/ironic/ironic.conf disk_utils iscsi_verify_attempts 30
+sudo openstack-config --set /etc/ironic/ironic.conf disk_partitioner check_device_max_retries 40
sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
@@ -254,22 +249,22 @@ EOI
# configure external network
ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" << EOI
-if [[ "$public_network_vlan" != "native" ]]; then
- cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-vlan${public_network_vlan}
-DEVICE=vlan${public_network_vlan}
+if [[ "$external_installer_vm_vlan" != "native" ]]; then
+ cat <<EOF > /etc/sysconfig/network-scripts/ifcfg-vlan${external_installer_vm_vlan}
+DEVICE=vlan${external_installer_vm_vlan}
ONBOOT=yes
DEVICETYPE=ovs
TYPE=OVSIntPort
BOOTPROTO=static
-IPADDR=${public_network_provisioner_ip}
-PREFIX=${public_network_cidr##*/}
+IPADDR=${external_installer_vm_ip}
+PREFIX=${external_cidr##*/}
OVS_BRIDGE=br-ctlplane
-OVS_OPTIONS="tag=${public_network_vlan}"
+OVS_OPTIONS="tag=${external_installer_vm_vlan}"
EOF
- ifup vlan${public_network_vlan}
+ ifup vlan${external_installer_vm_vlan}
else
- if ! ip a s eth2 | grep ${public_network_provisioner_ip} > /dev/null; then
- ip a a ${public_network_provisioner_ip}/${public_network_cidr##*/} dev eth2
+ if ! ip a s eth2 | grep ${external_installer_vm_ip} > /dev/null; then
+ ip a a ${external_installer_vm_ip}/${external_cidr##*/} dev eth2
ip link set up dev eth2
fi
fi
diff --git a/lib/virtual-setup-functions.sh b/lib/virtual-setup-functions.sh
index 116d19b6..8aaa3594 100755
--- a/lib/virtual-setup-functions.sh
+++ b/lib/virtual-setup-functions.sh
@@ -51,8 +51,8 @@ EOF
fi
fi
if ! virsh list --all | grep baremetal${i} > /dev/null; then
- define_vm baremetal${i} network 41 'admin_network' $vcpus $ramsize
- for n in private_network public_network storage_network api_network; do
+ define_vm baremetal${i} network 41 'admin' $vcpus $ramsize
+ for n in tenant external storage api; do
if [[ $enabled_network_list =~ $n ]]; then
echo -n "$n "
virsh attach-interface --domain baremetal${i} --type network --source $n --model virtio --config
@@ -62,7 +62,7 @@ EOF
echo "Found baremetal${i} VM, using existing VM"
fi
#virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2
- mac=$(virsh domiflist baremetal${i} | grep admin_network | awk '{ print $5 }')
+ mac=$(virsh domiflist baremetal${i} | grep admin | awk '{ print $5 }')
cat >> $APEX_TMP_DIR/inventory-virt.yaml << EOF
node${i}:
diff --git a/tests/config/network_settings_duplicate_nic.yaml b/tests/config/network_settings_duplicate_nic.yaml
deleted file mode 100644
index 24dd5ca0..00000000
--- a/tests/config/network_settings_duplicate_nic.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-# This configuration file defines Network Environment for a
-# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
-#
-# - admin
-# - private*
-# - public
-# - storage*
-#
-# *) optional networks
-#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
-# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
-#
-# See short description of the networks in the comments below.
-#
-
-# DNS Servers for all nodes, comma delimited list
-dns_servers: ["8.8.8.8", "8.8.4.4"]
-
-# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
-#
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- compute_interface: eth1
- controller_interface: eth2
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
-
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
-#
-private_network:
- enabled: true
- vlan: native
- cidr: 11.0.0.0/24
- compute_interface: enp0s4
- controller_interface: nic3
-
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
-#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- compute_interface: eth1
- controller_interface: enp0s3
- vlan: native
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220
- usable_ip_range: 192.168.37.10,192.168.37.199
- provisioner_ip: 192.168.37.1
-
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
-#
-storage_network:
- enabled: true
- vlan: native
- cidr: 12.0.0.0/24
- compute_interface: eth5
- controller_interface: eth6
-
-#admin_network:
-# enabled: true
-# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond
-# bridged_interface: '' #Interface to bridge to for installer VM
-# bond_interfaces: '' #Interfaces to create bond with for installer VM
-# compute_interface: nic4 #Interface used for this network on the compute node. Can either be logical nic name like "nic1" or real name like "eth1"
-# controller_interface: nic4 #Interface used for this network on the controller node. Can either be logical nic name like "nic1" or real name like "eth1"
-# vlan: native #VLAN tag to use, native means none
-# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
-# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected
-# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet
-# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
-# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned
-# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes)
-#private_network:
-# enabled: false #If disabled, internal api traffic will collapse to admin_network
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
diff --git a/tests/config/network_settings_missing_required_nic.yaml b/tests/config/network_settings_missing_required_nic.yaml
deleted file mode 100644
index 18886278..00000000
--- a/tests/config/network_settings_missing_required_nic.yaml
+++ /dev/null
@@ -1,113 +0,0 @@
-# This configuration file defines Network Environment for a
-# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
-#
-# - admin
-# - private*
-# - public
-# - storage*
-#
-# *) optional networks
-#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
-# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
-#
-# See short description of the networks in the comments below.
-#
-
-# DNS Servers for all nodes, comma delimited list
-dns_servers: ["8.8.8.8", "8.8.4.4"]
-
-# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
-#
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- compute_interface: eth1
- controller_interface: eth2
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
-
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
-#
-private_network:
- enabled: true
- vlan: native
- cidr: 11.0.0.0/24
- compute_interface: enp0s4
- controller_interface: nic3
-
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
-#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- vlan: native
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220
- usable_ip_range: 192.168.37.10,192.168.37.199
- provisioner_ip: 192.168.37.1
-
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
-#
-storage_network:
- enabled: true
- vlan: native
- cidr: 12.0.0.0/24
- compute_interface: eth5
- controller_interface: eth6
-
-#admin_network:
-# enabled: true
-# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond
-# bridged_interface: '' #Interface to bridge to for installer VM
-# bond_interfaces: '' #Interfaces to create bond with for installer VM
-# compute_interface: nic4 #Interface used for this network on the compute node. Can either be logical nic name like "nic1" or real name like "eth1"
-# controller_interface: nic4 #Interface used for this network on the controller node. Can either be logical nic name like "nic1" or real name like "eth1"
-# vlan: native #VLAN tag to use, native means none
-# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
-# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected
-# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet
-# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
-# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned
-# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes)
-#private_network:
-# enabled: false #If disabled, internal api traffic will collapse to admin_network
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
diff --git a/tests/config/network_settings_nic1_reserved.yaml b/tests/config/network_settings_nic1_reserved.yaml
deleted file mode 100644
index 8abcfc85..00000000
--- a/tests/config/network_settings_nic1_reserved.yaml
+++ /dev/null
@@ -1,113 +0,0 @@
-# This configuration file defines Network Environment for a
-# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
-#
-# - admin
-# - private*
-# - public
-# - storage*
-#
-# *) optional networks
-#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
-# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
-#
-# See short description of the networks in the comments below.
-#
-
-# DNS Servers for all nodes, comma delimited list
-dns_servers: ["8.8.8.8", "8.8.4.4"]
-
-# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
-#
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
-
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
-#
-private_network:
- enabled: true
- vlan: native
- cidr: 11.0.0.0/24
- compute_interface: enp0s4
- controller_interface: nic3
-
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
-#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- compute_interface: nic1
- controller_interface: enp0s3
- vlan: native
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220
- usable_ip_range: 192.168.37.10,192.168.37.199
- provisioner_ip: 192.168.37.1
-
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
-#
-storage_network:
- enabled: true
- vlan: native
- cidr: 12.0.0.0/24
- compute_interface: eth5
- controller_interface: eth6
-
-#admin_network:
-# enabled: true
-# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond
-# bridged_interface: '' #Interface to bridge to for installer VM
-# bond_interfaces: '' #Interfaces to create bond with for installer VM
-# overcloud_compute_interface: nic4 #Interface used for this network on the compute node. Can either be logical nic name like "nic1" or real name like "eth1"
-# overcloud_controller_interface: nic4 #Interface used for this network on the controller node. Can either be logical nic name like "nic1" or real name like "eth1"
-# vlan: native #VLAN tag to use, native means none
-# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
-# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected
-# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet
-# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
-# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned
-# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes)
-#private_network:
-# enabled: false #If disabled, internal api traffic will collapse to admin_network
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
diff --git a/tests/config/network_settings_nics_not_specified.yaml b/tests/config/network_settings_nics_not_specified.yaml
deleted file mode 100644
index e5089435..00000000
--- a/tests/config/network_settings_nics_not_specified.yaml
+++ /dev/null
@@ -1,107 +0,0 @@
-# This configuration file defines Network Environment for a
-# Baremetal Deployment of OPNFV. It contains default values
-# for 4 following networks:
-#
-# - admin
-# - private*
-# - public
-# - storage*
-#
-# *) optional networks
-#
-# Any values missing from this configuration file will be
-# auto-detected by deployment script from the existing network
-# configuration of the jumphost.
-#
-# Optional networks will be consolidated with the admin network
-# if not explicitely configured.
-#
-# See short description of the networks in the comments below.
-#
-
-# DNS Servers for all nodes, comma delimited list
-dns_servers: ["8.8.8.8", "8.8.4.4"]
-
-# "admin" is the short name for Control Plane Network.
-# During OPNFV deployment it is used for node provisioning so
-# PXE boot should be enabled for the related interfaces on all
-# the nodes in the OPNFV cluster. After the deployment this
-# network is used as the OpenStack management network which
-# carries e.g. communication between its internal components.
-#
-admin_network:
- enabled: true
- network_type: bridged
- bridged_interface: ''
- bond_interfaces: ''
- usable_ip_range: 192.0.2.11,192.0.2.99
- gateway: 192.0.2.1
- provisioner_ip: 192.0.2.1
- cidr: 192.0.2.0/24
- dhcp_range: 192.0.2.2,192.0.2.10
- introspection_range: 192.0.2.100,192.0.2.120
-
-# "private" is an optional network used as underlying physical
-# network for virtual provider and tenant networks created by
-# users. Traffic between virtual machines is carried by this
-# network.
-#
-private_network:
- enabled: true
- vlan: native
- cidr: 11.0.0.0/24
-
-# "public" network is used for external connectivity.
-# The external network provides Internet access for virtual
-# machines. If floating IP range is defined for this network,
-# floating IP addresses can be used for accessing virtual
-# machines from outside of OPNFV cluster. Also external REST
-# API calls use this network.
-#
-public_network:
- enabled: true
- network_type: ''
- bridged_interface: ''
- vlan: native
- cidr: 192.168.37.0/24
- gateway: 192.168.37.1
- floating_ip_range: 192.168.37.200,192.168.37.220
- usable_ip_range: 192.168.37.10,192.168.37.199
- provisioner_ip: 192.168.37.1
-
-# "storage" is an optional network used by storage backends.
-# You can configure this network in order to reduce load on
-# Control Plane Network.
-#
-storage_network:
- enabled: true
- vlan: native
- cidr: 12.0.0.0/24
-
-#admin_network:
-# enabled: true
-# network_type: bridged #Indicates if this network will be bridged to an interface, or to a bond
-# bridged_interface: '' #Interface to bridge to for installer VM
-# bond_interfaces: '' #Interfaces to create bond with for installer VM
-# compute_interface: nic4 #Interface used for this network on the compute node. Can either be logical nic name like "nic1" or real name like "eth1"
-# controller_interface: nic4 #Interface used for this network on the controller node. Can either be logical nic name like "nic1" or real name like "eth1"
-# vlan: native #VLAN tag to use, native means none
-# usable_ip_range: 192.0.2.11,192.0.2.99 #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
-# gateway: 192.0.2.1 #Gateway (only needed when public_network is disabled), if empty it is auto-detected
-# provisioner_ip: 192.0.2.1 #installer VM IP, if empty it is the next available IP in the admin subnet
-# cidr: 192.0.2.0/24 #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
-# dhcp_range: 192.0.2.2,192.0.2.10 #dhcp range for the admin network, if empty it will be automatically provisioned
-# introspection_range: 192.0.2.100,192.0.2.120 #Range used for introspection phase (examining nodes)
-#private_network:
-# enabled: false #If disabled, internal api traffic will collapse to admin_network
-#public_network:
-# enabled: true #If disabled, public_network traffic will collapse to admin network
-# network_type: ''
-# bridged_interface: ''
-# cidr: 192.168.37.0/24
-# gateway: 192.168.37.1
-# floating_ip_range: 192.168.37.200,192.168.37.220 #Range to allocate to floating IPs for the public network with Neutron
-# usable_ip_range: 192.168.37.10,192.168.37.199 #Usable IP range on the public network, usually this is a shared subnet
-# provisioner_ip: 192.168.37.1
-#storage_network:
-# enabled: false #If disabled, storage_network traffic will collapse to admin network
diff --git a/tests/test_apex_network_environment.py b/tests/test_apex_network_environment.py
index 673368e8..df0f0334 100644
--- a/tests/test_apex_network_environment.py
+++ b/tests/test_apex_network_environment.py
@@ -9,11 +9,14 @@
import ipaddress
+from copy import copy
+
from apex.common.constants import (
- PUBLIC_NETWORK,
- PRIVATE_NETWORK,
+ EXTERNAL_NETWORK,
+ TENANT_NETWORK,
STORAGE_NETWORK,
- API_NETWORK)
+ API_NETWORK,
+ CONTROLLER)
from apex.network_settings import NetworkSettings
from apex.network_environment import (
NetworkEnvironment,
@@ -33,6 +36,12 @@ class TestNetworkEnvironment(object):
@classmethod
def setup_class(klass):
"""This method is run once for each class before any tests are run"""
+ klass.ns = NetworkSettings(
+ '../config/network/network_settings.yaml')
+ klass.ns_vlans = NetworkSettings(
+ '../config/network/network_settings_vlans.yaml')
+ klass.ns_ipv6 = NetworkSettings(
+ '../config/network/network_settings_v6.yaml')
@classmethod
def teardown_class(klass):
@@ -48,84 +57,108 @@ class TestNetworkEnvironment(object):
assert_raises(NetworkEnvException, NetworkEnvironment,
None, '../build/network-environment.yaml')
- def test_netenv_settings_public_network(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
+ def test_netenv_settings_external_network_vlans(self):
# test vlans
- ns[PUBLIC_NETWORK]['vlan'] = 100
- ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+ ne = NetworkEnvironment(self.ns_vlans,
+ '../build/network-environment.yaml')
assert_equal(ne['parameter_defaults']['NeutronExternalNetworkBridge'],
'""')
- assert_equal(ne['parameter_defaults']['ExternalNetworkVlanID'], 100)
+ assert_equal(ne['parameter_defaults']['ExternalNetworkVlanID'], 501)
+ def test_netenv_settings_external_network_ipv6(self):
# Test IPv6
- ns[PUBLIC_NETWORK]['cidr'] = ipaddress.ip_network('::1/128')
- ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+ ne = NetworkEnvironment(self.ns_ipv6,
+ '../build/network-environment.yaml')
regstr = ne['resource_registry'][next(iter(EXTERNAL_RESOURCES.keys()))]
assert_equal(regstr.split('/')[-1], 'external_v6.yaml')
- def test_netenv_settings_private_network(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
- # test vlans
- ns[PRIVATE_NETWORK]['vlan'] = 100
+ def test_netenv_settings_external_network_removed(self):
+ ns = copy(self.ns)
+ # Test removing EXTERNAL_NETWORK
+ ns.enabled_network_list.remove(EXTERNAL_NETWORK)
ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
- assert_equal(ne['parameter_defaults']['TenantNetworkVlanID'], 100)
-
- # Test IPv6
- ns[PRIVATE_NETWORK]['cidr'] = ipaddress.ip_network('::1/128')
- ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
- regstr = ne['resource_registry'][next(iter(TENANT_RESOURCES.keys()))]
- assert_equal(regstr.split('/')[-1], 'tenant_v6.yaml')
+ regstr = ne['resource_registry'][next(iter(EXTERNAL_RESOURCES.keys()))]
+ assert_equal(regstr.split('/')[-1], 'noop.yaml')
- # Test removing PRIVATE_NETWORK
- ns.enabled_network_list.remove(PRIVATE_NETWORK)
+ def test_netenv_settings_tenant_network_vlans(self):
+ # test vlans
+ ne = NetworkEnvironment(self.ns_vlans,
+ '../build/network-environment.yaml')
+ assert_equal(ne['parameter_defaults']['TenantNetworkVlanID'], 401)
+
+# Apex is does not support v6 tenant networks
+# Though there is code that would fire if a
+# v6 cidr was passed in, just uncomment this to
+# cover that code
+# def test_netenv_settings_tenant_network_v6(self):
+# # Test IPv6
+# ne = NetworkEnvironment(self.ns_ipv6,
+# '../build/network-environment.yaml')
+# regstr = ne['resource_registry'][next(iter(TENANT_RESOURCES.keys()))]
+# assert_equal(regstr.split('/')[-1], 'tenant_v6.yaml')
+
+ def test_netenv_settings_tenant_network_removed(self):
+ ns = copy(self.ns)
+ # Test removing TENANT_NETWORK
+ ns.enabled_network_list.remove(TENANT_NETWORK)
ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
regstr = ne['resource_registry'][next(iter(TENANT_RESOURCES.keys()))]
assert_equal(regstr.split('/')[-1], 'noop.yaml')
- def test_netenv_settings_storage_network(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
+ def test_netenv_settings_storage_network_vlans(self):
# test vlans
- ns[STORAGE_NETWORK]['vlan'] = 100
- ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
- assert_equal(ne['parameter_defaults']['StorageNetworkVlanID'], 100)
+ ne = NetworkEnvironment(self.ns_vlans,
+ '../build/network-environment.yaml')
+ assert_equal(ne['parameter_defaults']['StorageNetworkVlanID'], 201)
+ def test_netenv_settings_storage_network_v6(self):
# Test IPv6
- ns[STORAGE_NETWORK]['cidr'] = ipaddress.ip_network('::1/128')
- ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+ ne = NetworkEnvironment(self.ns_ipv6,
+ '../build/network-environment.yaml')
regstr = ne['resource_registry'][next(iter(STORAGE_RESOURCES.keys()))]
assert_equal(regstr.split('/')[-1], 'storage_v6.yaml')
+ def test_netenv_settings_storage_network_removed(self):
+ ns = copy(self.ns)
# Test removing STORAGE_NETWORK
ns.enabled_network_list.remove(STORAGE_NETWORK)
ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
regstr = ne['resource_registry'][next(iter(STORAGE_RESOURCES.keys()))]
assert_equal(regstr.split('/')[-1], 'noop.yaml')
- def test_netenv_settings_api_network(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
+ def test_netenv_settings_api_network_v4(self):
+ ns = copy(self.ns_vlans)
+ ns['networks'][API_NETWORK]['enabled'] = True
+ ns['networks'][API_NETWORK]['cidr'] = '10.11.12.0/24'
+ ns = NetworkSettings(ns)
# test vlans
- ns.enabled_network_list.append(API_NETWORK)
- ns[API_NETWORK] = {'vlan': 100,
- 'cidr': ipaddress.ip_network('10.10.10.0/24'),
- 'usable_ip_range': '10.10.10.10,10.10.10.100'}
ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
- assert_equal(ne['parameter_defaults']['InternalApiNetworkVlanID'], 100)
+ assert_equal(ne['parameter_defaults']['InternalApiNetworkVlanID'], 101)
- # Test IPv6
- ns[API_NETWORK]['cidr'] = ipaddress.ip_network('::1/128')
+ def test_netenv_settings_api_network_vlans(self):
+ ns = copy(self.ns_vlans)
+ ns['networks'][API_NETWORK]['enabled'] = True
+ ns = NetworkSettings(ns)
+ # test vlans
ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
+ assert_equal(ne['parameter_defaults']['InternalApiNetworkVlanID'], 101)
+
+ def test_netenv_settings_api_network_v6(self):
+ # Test IPv6
+ ne = NetworkEnvironment(self.ns_ipv6,
+ '../build/network-environment.yaml')
regstr = ne['resource_registry'][next(iter(API_RESOURCES.keys()))]
assert_equal(regstr.split('/')[-1], 'internal_api_v6.yaml')
- # Test removing API_NETWORK
- ns.enabled_network_list.remove(API_NETWORK)
+ def test_netenv_settings_api_network_removed(self):
+ ns = copy(self.ns)
+ # API_NETWORK is not in the default network settings file
ne = NetworkEnvironment(ns, '../build/network-environment.yaml')
regstr = ne['resource_registry'][next(iter(API_RESOURCES.keys()))]
assert_equal(regstr.split('/')[-1], 'noop.yaml')
def test_numa_configs(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
- ne = NetworkEnvironment(ns, '../build/network-environment.yaml',
+ ne = NetworkEnvironment(self.ns, '../build/network-environment.yaml',
compute_pre_config=True,
controller_pre_config=True)
assert_is_instance(ne, dict)
diff --git a/tests/test_apex_network_settings.py b/tests/test_apex_network_settings.py
index ff61cc4b..955c0cf7 100644
--- a/tests/test_apex_network_settings.py
+++ b/tests/test_apex_network_settings.py
@@ -7,6 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from apex.common.constants import (
+ EXTERNAL_NETWORK,
+ STORAGE_NETWORK,
+ ADMIN_NETWORK,
+)
+
from apex.network_settings import (
NetworkSettings,
NetworkSettingsException,
@@ -18,6 +24,8 @@ from nose.tools import (
assert_raises
)
+files_dir = '../config/network/'
+
class TestNetworkSettings(object):
@classmethod
@@ -35,48 +43,118 @@ class TestNetworkSettings(object):
"""This method is run once after _each_ test method is executed"""
def test_init(self):
- NetworkSettings('../config/network/network_settings.yaml', True)
+ assert_is_instance(
+ NetworkSettings(files_dir+'network_settings.yaml'),
+ NetworkSettings)
+
+ def test_init_vlans(self):
+ assert_is_instance(
+ NetworkSettings(files_dir+'network_settings_vlans.yaml'),
+ NetworkSettings)
+
+# TODO, v6 test is stuck
+ # def test_init_v6(self):
+ # assert_is_instance(
+ # NetworkSettings(files_dir+'network_settings_v6.yaml', True),
+ # NetworkSettings)
+
+ def test_init_admin_disabled_or_missing(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ # remove admin, apex section will re-add it
+ ns['networks'].pop('admin', None)
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+ # remove admin and apex
+ ns.pop('apex', None)
+ ns['networks'].pop('admin', None)
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+
+ def test_init_collapse_storage(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ # remove storage
+ ns['networks'].pop('storage', None)
+ assert_is_instance(NetworkSettings(ns), NetworkSettings)
+
+ def test_init_missing_dns_domain(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ # remove storage
+ ns.pop('dns-domain', None)
+ assert_is_instance(NetworkSettings(ns), NetworkSettings)
def test_dump_bash(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
+ ns = NetworkSettings('../config/network/network_settings.yaml')
assert_equal(ns.dump_bash(), None)
assert_equal(ns.dump_bash(path='/dev/null'), None)
def test_get_network_settings(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
- assert_is_instance(ns, dict)
+ ns = NetworkSettings('../config/network/network_settings.yaml')
+ assert_is_instance(ns, NetworkSettings)
for role in ['controller', 'compute']:
nic_index = 1
- for network in ['admin_network', 'private_network',
- 'public_network', 'storage_network']:
- nic = 'nic' + str(nic_index)
- assert_equal(ns.nics[role][network], nic)
- nic_index += 1
-
- def test_get_network_settings_unspecified_nics(self):
- ns = NetworkSettings(
- '../tests/config/network_settings_nics_not_specified.yaml',
- True)
- assert_is_instance(ns, dict)
- for role in ['controller', 'compute']:
- nic_index = 1
- for network in ['admin_network', 'private_network',
- 'public_network', 'storage_network']:
+ print(ns.nics)
+ for network in ns.enabled_network_list:
nic = 'nic' + str(nic_index)
assert_equal(ns.nics[role][network], nic)
nic_index += 1
def test_get_enabled_networks(self):
- ns = NetworkSettings('../config/network/network_settings.yaml', True)
- assert_is_instance(ns.get_enabled_networks(), list)
-
- def test_negative_network_settings(self):
- assert_raises(NetworkSettingsException, NetworkSettings,
- '../tests/config/network_settings_duplicate_nic.yaml',
- True)
- assert_raises(NetworkSettingsException, NetworkSettings,
- '../tests/config/network_settings_nic1_reserved.yaml',
- True)
- assert_raises(NetworkSettingsException, NetworkSettings,
- '../tests/config/network_settings_missing_required_nic'
- '.yaml', True)
+ ns = NetworkSettings('../config/network/network_settings.yaml')
+ assert_is_instance(ns.enabled_network_list, list)
+
+ def test_invalid_nic_members(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ storage_net_nicmap = ns['networks'][STORAGE_NETWORK]['nic_mapping']
+ # set duplicate nic
+ storage_net_nicmap['compute']['members'][0] = 'nic1'
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+ # remove nic members
+ storage_net_nicmap['compute']['members'] = []
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+
+ def test_missing_vlan(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ storage_net_nicmap = ns['networks'][STORAGE_NETWORK]['nic_mapping']
+ # remove vlan from storage net
+ storage_net_nicmap['compute'].pop('vlan', None)
+ assert_is_instance(NetworkSettings(ns), NetworkSettings)
+
+# TODO
+# need to manipulate interfaces some how
+# maybe for ip_utils to return something to pass this
+# def test_admin_auto_detect(self):
+# ns = NetworkSettings(files_dir+'network_settings.yaml')
+# # remove cidr to force autodetection
+# ns['networks'][ADMIN_NETWORK].pop('cidr', None)
+# assert_is_instance(NetworkSettings(ns), NetworkSettings)
+
+ def test_admin_fail_auto_detect(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ # remove cidr and installer_vm to fail autodetect
+ ns['networks'][ADMIN_NETWORK].pop('cidr', None)
+ ns['networks'][ADMIN_NETWORK].pop('installer_vm', None)
+ assert_raises(NetworkSettingsException, NetworkSettings, ns)
+
+ def test_exception(self):
+ e = NetworkSettingsException("test")
+ print(e)
+ assert_is_instance(e, NetworkSettingsException)
+
+ def test_config_ip(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ # set the provisioner ip to None to force _gen_ip to generate one
+ ns['networks'][ADMIN_NETWORK]['installer_vm']['ip'] = None
+ ns['networks'][EXTERNAL_NETWORK][0]['installer_vm']['ip'] = None
+ # Now rebuild network settings object and check for repopulated values
+ ns = NetworkSettings(ns)
+ assert_equal(ns['networks'][ADMIN_NETWORK]['installer_vm']['ip'],
+ '192.0.2.1')
+ assert_equal(ns['networks'][EXTERNAL_NETWORK][0]['installer_vm']['ip'],
+ '192.168.37.1')
+
+ def test_config_gateway(self):
+ ns = NetworkSettings(files_dir+'network_settings.yaml')
+ # set the gateway ip to None to force _config_gateway to generate one
+ ns['networks'][EXTERNAL_NETWORK][0]['gateway'] = None
+ # Now rebuild network settings object and check for a repopulated value
+ ns = NetworkSettings(ns)
+ assert_equal(ns['networks'][EXTERNAL_NETWORK][0]['gateway'],
+ '192.168.37.1')
diff --git a/tests/test_apex_python_utils_py.py b/tests/test_apex_python_utils_py.py
index 28180f06..eb16f67d 100644
--- a/tests/test_apex_python_utils_py.py
+++ b/tests/test_apex_python_utils_py.py
@@ -62,7 +62,6 @@ class TestCommonUtils(object):
tmp_dir = tempfile.mkdtemp()
args = self.parser.parse_args(['parse-net-settings',
'-s', net_sets,
- '--flat',
'-td', tmp_dir,
'-e', net_env])
assert_equal(parse_net_settings(args), None)