summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--apex/deploy.py11
-rw-r--r--apex/overcloud/deploy.py51
-rw-r--r--apex/tests/test_apex_overcloud_deploy.py182
-rw-r--r--build/nics-template.yaml.jinja212
-rw-r--r--build/rpm_specs/opnfv-apex.spec3
-rw-r--r--build/upstream-environment.yaml5
-rw-r--r--config/network/network_settings.yaml6
-rw-r--r--config/network/network_settings_csit.yaml6
-rw-r--r--config/network/network_settings_tenant_vlan.yaml333
-rw-r--r--config/network/network_settings_v6.yaml6
10 files changed, 562 insertions, 53 deletions
diff --git a/apex/deploy.py b/apex/deploy.py
index 1e477ee0..9bf9b12e 100644
--- a/apex/deploy.py
+++ b/apex/deploy.py
@@ -416,15 +416,8 @@ def main():
# opnfv env file will not work with upstream
args.env_file = 'upstream-environment.yaml'
opnfv_env = os.path.join(args.deploy_dir, args.env_file)
-
- # TODO(trozet): Invoke with containers after Fraser migration
- # oc_deploy.prep_env(deploy_settings, net_settings, inventory,
- # opnfv_env, net_env_target, APEX_TEMP_DIR)
-
- shutil.copyfile(
- opnfv_env,
- os.path.join(APEX_TEMP_DIR, os.path.basename(opnfv_env))
- )
+ oc_deploy.prep_env(deploy_settings, net_settings, inventory,
+ opnfv_env, net_env_target, APEX_TEMP_DIR)
patched_containers = oc_deploy.prep_image(
deploy_settings, net_settings, sdn_image, APEX_TEMP_DIR,
root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'])
diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py
index db7e42c3..e1af210d 100644
--- a/apex/overcloud/deploy.py
+++ b/apex/overcloud/deploy.py
@@ -496,6 +496,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
else:
perf = False
+ tenant_settings = ns['networks']['tenant']
+ tenant_vlan_enabled = 'tenant' in ns.enabled_network_list and \
+ ns['networks']['tenant'].get('segmentation_type') == 'vlan'
+
# Modify OPNFV environment
# TODO: Change to build a dict and outputting yaml rather than parsing
for line in fileinput.input(tmp_opnfv_env, inplace=True):
@@ -519,6 +523,46 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
ds_opts['dataplane'] == 'ovs_dpdk':
output_line = ' OS::TripleO::ComputeExtraConfigPre: ' \
'./ovs-dpdk-preconfig.yaml'
+ elif 'NeutronNetworkVLANRanges' in line:
+ vlan_setting = ''
+ if tenant_vlan_enabled:
+ if ns['networks']['tenant']['overlay_id_range']:
+ vlan_setting = ns['networks']['tenant']['overlay_id_range']
+ if 'datacentre' not in vlan_setting:
+ vlan_setting += ',datacentre:1:1000'
+ # SRIOV networks are VLAN based provider networks. In order to
+ # simplify the deployment, nfv_sriov will be the default physnet.
+ # VLANs are not needed in advance, and the user will have to create
+ # the network specifying the segmentation-id.
+ if ds_opts['sriov']:
+ if vlan_setting:
+ vlan_setting += ",nfv_sriov"
+ else:
+ vlan_setting = "datacentre:1:1000,nfv_sriov"
+ if vlan_setting:
+ output_line = " NeutronNetworkVLANRanges: " + vlan_setting
+ elif 'NeutronBridgeMappings' in line and tenant_vlan_enabled:
+ if tenant_settings['overlay_id_range']:
+ physnets = tenant_settings['overlay_id_range'].split(',')
+ output_line = " NeutronBridgeMappings: "
+ for physnet in physnets:
+ physnet_name = physnet.split(':')[0]
+ if physnet_name != 'datacentre':
+ output_line += "{}:br-vlan,".format(physnet_name)
+ output_line += "datacentre:br-ex"
+ elif 'OpenDaylightProviderMappings' in line and tenant_vlan_enabled \
+ and ds_opts['sdn_controller'] == 'opendaylight':
+ if tenant_settings['overlay_id_range']:
+ physnets = tenant_settings['overlay_id_range'].split(',')
+ output_line = " OpenDaylightProviderMappings: "
+ for physnet in physnets:
+ physnet_name = physnet.split(':')[0]
+ if physnet_name != 'datacentre':
+ output_line += "{}:br-vlan,".format(physnet_name)
+ output_line += "datacentre:br-ex"
+ elif 'NeutronNetworkType' in line and tenant_vlan_enabled:
+ output_line = " NeutronNetworkType: vlan\n" \
+ " NeutronTunnelTypes: ''"
if ds_opts['sdn_controller'] == 'opendaylight' and \
'odl_vpp_routing_node' in ds_opts:
@@ -544,13 +588,6 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
elif 'ComputeServices' in line:
output_line = (" ComputeServices:\n"
" - OS::TripleO::Services::NeutronDhcpAgent")
- # SRIOV networks are VLAN based provider networks. In order to simplify
- # the deployment, nfv_sriov will be the default physnet. VLANs are not
- # needed in advance, and the user will have to create the network
- # specifying the segmentation-id.
- if ds_opts['sriov']:
- if 'NeutronNetworkVLANRanges' in line:
- output_line = ("{},nfv_sriov'".format(line[:-1]))
if perf:
for role in 'NovaCompute', 'Controller':
diff --git a/apex/tests/test_apex_overcloud_deploy.py b/apex/tests/test_apex_overcloud_deploy.py
index b598e40e..d12b1a40 100644
--- a/apex/tests/test_apex_overcloud_deploy.py
+++ b/apex/tests/test_apex_overcloud_deploy.py
@@ -362,24 +362,27 @@ class TestOvercloudDeploy(unittest.TestCase):
'ovs': {'dpdk_cores': 'test'},
'kernel': {'test': 'test'}},
'Controller': {'vpp': 'test'}}}}
- ns = {'domain_name': 'test.domain',
- 'networks':
- {'tenant':
- {'nic_mapping': {'controller':
- {'members': ['tenant_nic']},
- 'compute':
- {'members': ['tenant_nic']}}},
- 'external':
- [{'nic_mapping': {'controller':
- {'members': ['ext_nic']},
- 'compute':
- {'members': ['ext_nic']}}}]}}
+ ns_dict = {'domain_name': 'test.domain',
+ 'networks':
+ {'tenant':
+ {'nic_mapping': {'controller':
+ {'members': ['tenant_nic']},
+ 'compute':
+ {'members': ['tenant_nic']}}},
+ 'external':
+ [{'nic_mapping': {'controller':
+ {'members': ['ext_nic']},
+ 'compute':
+ {'members': ['ext_nic']}}}]}}
inv = None
try:
# Swap stdout
saved_stdout = sys.stdout
out = StringIO()
sys.stdout = out
+ ns = MagicMock()
+ ns.enabled_network_list = ['external', 'tenant']
+ ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
# run test
prep_env(ds, ns, inv, 'opnfv-env.yml', '/net-env.yml', '/tmp')
output = out.getvalue().strip()
@@ -402,24 +405,27 @@ class TestOvercloudDeploy(unittest.TestCase):
'sriov': 'xxx',
'performance': {'Compute': {},
'Controller': {}}}}
- ns = {'domain_name': 'test.domain',
- 'networks':
- {'tenant':
- {'nic_mapping': {'controller':
- {'members': ['tenant_nic']},
- 'compute':
- {'members': ['tenant_nic']}}},
- 'external':
- [{'nic_mapping': {'controller':
- {'members': ['ext_nic']},
- 'compute':
- {'members': ['ext_nic']}}}]}}
+ ns_dict = {'domain_name': 'test.domain',
+ 'networks':
+ {'tenant':
+ {'nic_mapping': {'controller':
+ {'members': ['tenant_nic']},
+ 'compute':
+ {'members': ['tenant_nic']}}},
+ 'external':
+ [{'nic_mapping': {'controller':
+ {'members': ['ext_nic']},
+ 'compute':
+ {'members': ['ext_nic']}}}]}}
inv = None
try:
# Swap stdout
saved_stdout = sys.stdout
out = StringIO()
sys.stdout = out
+ ns = MagicMock()
+ ns.enabled_network_list = ['external', 'tenant']
+ ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
# run test
prep_env(ds, ns, inv, 'opnfv-env.yml', '/net-env.yml', '/tmp')
output = out.getvalue().strip()
@@ -442,18 +448,18 @@ class TestOvercloudDeploy(unittest.TestCase):
'dataplane': 'fdio',
'sriov': 'xxx',
'dvr': True}}
- ns = {'domain_name': 'test.domain',
- 'networks':
- {'tenant':
- {'nic_mapping': {'controller':
- {'members': ['tenant_nic']},
- 'compute':
- {'members': ['tenant_nic']}}},
- 'external':
- [{'nic_mapping': {'controller':
- {'members': ['ext_nic']},
- 'compute':
- {'members': ['ext_nic']}}}]}}
+ ns_dict = {'domain_name': 'test.domain',
+ 'networks':
+ {'tenant':
+ {'nic_mapping': {'controller':
+ {'members': ['tenant_nic']},
+ 'compute':
+ {'members': ['tenant_nic']}}},
+ 'external':
+ [{'nic_mapping': {'controller':
+ {'members': ['ext_nic']},
+ 'compute':
+ {'members': ['ext_nic']}}}]}}
inv = MagicMock()
inv.get_node_counts.return_value = (3, 2)
try:
@@ -461,6 +467,9 @@ class TestOvercloudDeploy(unittest.TestCase):
saved_stdout = sys.stdout
out = StringIO()
sys.stdout = out
+ ns = MagicMock()
+ ns.enabled_network_list = ['external', 'tenant']
+ ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
# run test
prep_env(ds, ns, inv, 'opnfv-env.yml', '/net-env.yml', '/tmp')
output = out.getvalue().strip()
@@ -469,6 +478,107 @@ class TestOvercloudDeploy(unittest.TestCase):
# put stdout back
sys.stdout = saved_stdout
+ @patch('apex.overcloud.deploy.fileinput')
+ @patch('apex.overcloud.deploy.shutil')
+ def test_prep_env_tenant_vlan(self, mock_shutil, mock_fileinput):
+ mock_fileinput.input.return_value = \
+ ['NeutronNetworkVLANRanges',
+ 'NeutronNetworkType', 'NeutronBridgeMappings']
+ ds = {'deploy_options':
+ {'sdn_controller': False,
+ 'dataplane': 'ovs',
+ 'sriov': 'xxx',
+ 'dvr': True}}
+ ns_dict = {'domain_name': 'test.domain',
+ 'networks':
+ {'tenant':
+ {'nic_mapping': {'controller':
+ {'members': ['tenant_nic']},
+ 'compute':
+ {'members': ['tenant_nic']}},
+ 'segmentation_type': 'vlan',
+ 'overlay_id_range': 'vlan:500:600'
+ },
+ 'external':
+ [{'nic_mapping': {'controller':
+ {'members': ['ext_nic']},
+ 'compute':
+ {'members': ['ext_nic']}}}]}}
+ inv = MagicMock()
+ inv.get_node_counts.return_value = (3, 2)
+ try:
+ # Swap stdout
+ saved_stdout = sys.stdout
+ out = StringIO()
+ sys.stdout = out
+ ns = MagicMock()
+ ns.enabled_network_list = ['external', 'tenant']
+ ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
+ # run test
+ prep_env(ds, ns, inv, 'opnfv-env.yml', '/net-env.yml', '/tmp')
+ output = out.getvalue().strip()
+ assert_in('NeutronNetworkVLANRanges: '
+ 'vlan:500:600,datacentre:1:1000', output)
+ assert_in('NeutronNetworkType: vlan', output)
+ assert_in('NeutronBridgeMappings: '
+ 'vlan:br-vlan,datacentre:br-ex', output)
+ assert_not_in('OpenDaylightProviderMappings', output)
+ finally:
+ # put stdout back
+ sys.stdout = saved_stdout
+
+ @patch('apex.overcloud.deploy.fileinput')
+ @patch('apex.overcloud.deploy.shutil')
+ def test_prep_env_tenant_vlan_odl(self, mock_shutil, mock_fileinput):
+ mock_fileinput.input.return_value = \
+ ['NeutronNetworkVLANRanges',
+ 'NeutronNetworkType',
+ 'NeutronBridgeMappings',
+ 'OpenDaylightProviderMappings']
+ ds = {'deploy_options':
+ {'sdn_controller': 'opendaylight',
+ 'dataplane': 'ovs',
+ 'sriov': 'xxx',
+ 'dvr': True}}
+ ns_dict = {'domain_name': 'test.domain',
+ 'networks':
+ {'tenant':
+ {'nic_mapping': {'controller':
+ {'members': ['tenant_nic']},
+ 'compute':
+ {'members': ['tenant_nic']}},
+ 'segmentation_type': 'vlan',
+ 'overlay_id_range': 'vlan:500:600'
+ },
+ 'external':
+ [{'nic_mapping': {'controller':
+ {'members': ['ext_nic']},
+ 'compute':
+ {'members': ['ext_nic']}}}]}}
+ inv = MagicMock()
+ inv.get_node_counts.return_value = (3, 2)
+ try:
+ # Swap stdout
+ saved_stdout = sys.stdout
+ out = StringIO()
+ sys.stdout = out
+ ns = MagicMock()
+ ns.enabled_network_list = ['external', 'tenant']
+ ns.__getitem__.side_effect = lambda i: ns_dict.get(i, MagicMock())
+ # run test
+ prep_env(ds, ns, inv, 'opnfv-env.yml', '/net-env.yml', '/tmp')
+ output = out.getvalue().strip()
+ assert_in('NeutronNetworkVLANRanges: '
+ 'vlan:500:600,datacentre:1:1000', output)
+ assert_in('NeutronNetworkType: vlan', output)
+ assert_in('NeutronBridgeMappings: '
+ 'vlan:br-vlan,datacentre:br-ex', output)
+ assert_in('OpenDaylightProviderMappings: '
+ 'vlan:br-vlan,datacentre:br-ex', output)
+ finally:
+ # put stdout back
+ sys.stdout = saved_stdout
+
def test_generate_ceph_key(self):
assert_equal(len(generate_ceph_key()), 40)
diff --git a/build/nics-template.yaml.jinja2 b/build/nics-template.yaml.jinja2
index 073d6680..11e0b115 100644
--- a/build/nics-template.yaml.jinja2
+++ b/build/nics-template.yaml.jinja2
@@ -190,6 +190,18 @@ resources:
name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
# force the MAC address of the bridge to this interface
primary: true
+ {%- elif nets['tenant']['segmentation_type'] == 'vlan' %}
+ type: ovs_bridge
+ name: br-vlan
+ use_dhcp: false
+ addresses:
+ -
+ ip_netmask: {get_param: TenantIpSubnet}
+ members:
+ -
+ type: interface
+ name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
+ primary: true
{%- else %}
-
type: {{ nets['tenant']['nic_mapping'][role]['phys_type'] }}
diff --git a/build/rpm_specs/opnfv-apex.spec b/build/rpm_specs/opnfv-apex.spec
index 4f9d98fb..c6bb5308 100644
--- a/build/rpm_specs/opnfv-apex.spec
+++ b/build/rpm_specs/opnfv-apex.spec
@@ -112,6 +112,7 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%{_sysconfdir}/opnfv-apex/network_settings_vlans.yaml
%{_sysconfdir}/opnfv-apex/network_settings_v6.yaml
%{_sysconfdir}/opnfv-apex/k8s-nosdn-nofeature-noha.yaml
+%{_sysconfdir}/opnfv-apex/network_settings_tenant_vlan.yaml
%doc %{_docdir}/opnfv/LICENSE.rst
%doc %{_docdir}/opnfv/installation-instructions.html
%doc %{_docdir}/opnfv/release-notes.rst
@@ -122,6 +123,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%doc %{_docdir}/opnfv/inventory.yaml.example
%changelog
+* Wed Jun 27 2018 Feng Pan <fpan@redhat.com> -7.0-4
+ Adds network_settings_tenant_vlan.yaml
* Wed Jun 20 2018 Zenghui Shi <zshi@redhat.com> - 7.0-3
Adds Kubernetes deployment scenario
* Fri Jun 15 2018 Tim Rozet <trozet@redhat.com> - 7.0-2
diff --git a/build/upstream-environment.yaml b/build/upstream-environment.yaml
index debe6f3a..2d037c38 100644
--- a/build/upstream-environment.yaml
+++ b/build/upstream-environment.yaml
@@ -7,7 +7,10 @@ parameters:
parameter_defaults:
DockerPuppetProcessCount: 10
- NeutronNetworkVLANRanges: 'datacentre:500:525'
+ #NeutronNetworkVLANRanges: 'datacentre:500:525'
+ #NeutronBridgeMappings: "datacentre:br-ex"
+ #OpenDaylightProviderMappings: "datacentre:br-ex"
+ NeutronNetworkType: vxlan
SshServerOptions:
HostKey:
- '/etc/ssh/ssh_host_rsa_key'
diff --git a/config/network/network_settings.yaml b/config/network/network_settings.yaml
index 4c3b63a2..fee6b500 100644
--- a/config/network/network_settings.yaml
+++ b/config/network/network_settings.yaml
@@ -116,6 +116,12 @@ networks:
mtu: 1500
# Tenant network Overlay segmentation ID range:
# VNI, VLAN-ID, etc.
+ # VLAN config should follow the pattern of neutron ml2 network_vlan_ranges
+ # allowed patterns are <physical_network> or
+ # <physical_network>:<vlan_min>:<vlan_max>
+ # Note that for VLAN config, physnet 'datacentre:1:1000' is used for
+ # external networks by default and will be automatically added to
+ # Neutron config if the range specified here does not include datacentre.
overlay_id_range: 2,65535
# Tenant network segmentation type:
diff --git a/config/network/network_settings_csit.yaml b/config/network/network_settings_csit.yaml
index 8eec0f65..75271140 100644
--- a/config/network/network_settings_csit.yaml
+++ b/config/network/network_settings_csit.yaml
@@ -116,6 +116,12 @@ networks:
mtu: 1500
# Tenant network Overlay segmentation ID range:
# VNI, VLAN-ID, etc.
+ # VLAN config should follow the pattern of neutron ml2 network_vlan_ranges
+ # allowed patterns are <physical_network> or
+ # <physical_network>:<vlan_min>:<vlan_max>
+ # Note that for VLAN config, physnet 'datacentre:1:1000' is used for
+ # external networks by default and will be automatically added to
+ # Neutron config if the range specified here does not include datacentre.
overlay_id_range: 2,65535
# Tenant network segmentation type:
diff --git a/config/network/network_settings_tenant_vlan.yaml b/config/network/network_settings_tenant_vlan.yaml
new file mode 100644
index 00000000..e1f009fa
--- /dev/null
+++ b/config/network/network_settings_tenant_vlan.yaml
@@ -0,0 +1,333 @@
+---
+# This configuration file defines Network Environment for a
+# Baremetal Deployment of OPNFV. It contains default values
+# for 5 following networks:
+#
+# - admin
+# - tenant*
+# - external*
+# - storage*
+# - api*
+# *) optional networks
+#
+# Optional networks will be consolidated with the admin network
+# if not explicitly configured.
+#
+# See short description of the networks in the comments below.
+#
+# "admin" is the short name for Control Plane Network.
+# This network should be IPv4 even it is an IPv6 deployment
+# IPv6 does not have PXE boot support.
+# During OPNFV deployment it is used for node provisioning which will require
+# PXE booting as well as running a DHCP server on this network. Be sure to
+# disable any other DHCP/TFTP server on this network.
+#
+# "tenant" is the network used for tenant traffic.
+#
+# "external" is the network which should have internet or external
+# connectivity. External OpenStack networks will be configured to egress this
+# network. There can be multiple external networks, but only one assigned as
+# "public" which OpenStack public API's will register.
+#
+# "storage" is the network for storage I/O.
+#
+# "api" is an optional network for splitting out OpenStack service API
+# communication. This should be used for IPv6 deployments.
+
+
+# Meta data for the network configuration
+network-config-metadata:
+ title: LF-POD-1 Network config
+ version: 0.1
+ created: Mon Dec 28 2015
+ comment: None
+
+# DNS Settings
+dns-domain: opnfvlf.org
+dns-search: opnfvlf.org
+dns_nameservers:
+ - 8.8.8.8
+ - 8.8.4.4
+# NTP servers
+ntp:
+ - 0.se.pool.ntp.org
+ - 1.se.pool.ntp.org
+# Syslog server
+syslog:
+ server: 10.128.1.24
+ transport: 'tcp'
+
+# http(s) proxy settings added to /etc/environment of uc and oc nodes
+# http_proxy: http://proxy.server:8080
+# https_proxy: https://proxy.server:8081
+
+# Common network settings
+networks:
+ # Admin configuration (pxe and jumpstart)
+ admin:
+ enabled: true
+ # Network settings for the Installer VM on admin network
+ installer_vm:
+ # Indicates if this VM will be bridged to an interface, or to a bond
+ nic_type: interface
+ # Interfaces to bridge for installer VM (use multiple values for bond)
+ members:
+ - em1
+ # VLAN tag to use for this network on Installer VM, native means none
+ vlan: native
+ # IP to assign to Installer VM on this network
+ ip: 192.0.2.1
+ # Usable ip range for the overcloud node IPs (including VIPs)
+ # Last IP is used for host bridge (i.e. br-admin).
+ # If empty entire range is usable.
+ # Cannot overlap with dhcp_range or introspection_range.
+ overcloud_ip_range:
+ - 192.0.2.51
+ - 192.0.2.99
+ # Gateway (only needed when public_network is disabled)
+ gateway: 192.0.2.1
+ # Subnet in CIDR format 192.168.1.0/24
+ cidr: 192.0.2.0/24
+ # DHCP range for the admin network, automatically provisioned if empty
+ dhcp_range:
+ - 192.0.2.2
+ - 192.0.2.50
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # Physical NIC members (Single value allowed for phys_type: interface)
+ members:
+ - eth0
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ phys_type: interface
+ members:
+ - eth0
+
+ # Tenant network configuration
+ tenant:
+ enabled: true
+ # Subnet in CIDR format 192.168.1.0/24
+ cidr: 11.0.0.0/24
+ # Tenant network MTU
+ mtu: 1500
+ # Tenant network Overlay segmentation ID range:
+ # VNI, VLAN-ID, etc.
+ # VLAN config should follow the pattern of neutron ml2 network_vlan_ranges
+ # allowed patterns are <physical_network> or
+ # <physical_network>:<vlan_min>:<vlan_max>
+ # Note that for VLAN config, physnet 'datacentre:1:1000' is used for
+ # external networks by default and will be automatically added to
+ # Neutron config if the range specified here does not include datacentre.
+ overlay_id_range: vlan:500:525
+
+ # Tenant network segmentation type:
+ # vlan, vxlan, gre
+ segmentation_type: vlan
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface/bond)
+ phys_type: interface
+ # UIO driver to use for DPDK scenarios.
+ # The value is ignored for non-DPDK scenarios.
+ uio_driver: uio_pci_generic
+ # VLAN tag to use with this NIC
+ vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ members:
+ # Note logical name like nic1 not valid for fdio deployment yet.
+ - eth1
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ # Physical interface type (interface/bond)
+ phys_type: interface
+ vlan: native
+ # Note: logicial names like nic1 are not valid for fdio deployment yet.
+ members:
+ - eth1
+
+ # Can contain 1 or more external networks
+ external:
+ - public:
+ enabled: true
+ # Public network MTU
+ mtu: 1500
+ # Network settings for the Installer VM on external network
+ # (note only valid on 'public' external network)
+ installer_vm:
+ # Indicates if this VM will be bridged to an interface, or to a bond
+ nic_type: interface
+ vlan: native
+ # Interfaces to bridge for installer VM (use multiple values for bond)
+ members:
+ - em1
+ # IP to assign to Installer VM on this network
+ ip: 192.168.37.1
+ cidr: 192.168.37.0/24
+ gateway: 192.168.37.1
+ # Range to allocate to floating IPs for the public network with Neutron
+ floating_ip_range:
+ - 192.168.37.200
+ - 192.168.37.220
+ # Usable ip range for the overcloud node IPs (including VIPs)
+ # Last IP will be used for host bridge (i.e. br-public).
+ # If empty entire range is usable.
+ # Cannot overlap with dhcp_range or introspection_range.
+ overcloud_ip_range:
+ - 192.168.37.10
+ - 192.168.37.199
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ # Note that this phys_type for external network will be changed
+ # to vpp_interface for odl_fdio scenarios and linux_bridge for
+ # nosdn_fdio scenarios.
+ phys_type: ovs_bridge
+ # VLAN tag to use with this NIC
+ vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ members:
+ - eth2
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ # Note that this phys_type for external network will be changed
+ # to vpp_interface for odl_fdio scenarios and linux_bridge for
+ # nosdn_fdio scenarios.
+ phys_type: ovs_bridge
+ vlan: native
+ members:
+ - eth2
+ # External network to be created in OpenStack by Services tenant
+ external_overlay:
+ name: Public_internet
+ type: flat
+ gateway: 192.168.37.1
+ # another external network
+ # This is an example and not yet supported
+ - private_cloud:
+ enabled: false
+ mtu: 1500
+ # Network settings for the Installer VM on external network
+ # note only valid on 'public' external network
+ installer_vm:
+ # Indicates if this VM will be bridged to an interface, or to a bond
+ nic_type: interface
+ vlan: 101
+ # Interfaces to bridge for installer VM (use multiple values for bond)
+ members:
+ - em1
+ # IP to assign to Installer VM on this network
+ ip: 192.168.38.1
+ cidr: 192.168.38.0/24
+ gateway: 192.168.38.1
+ # Range to allocate to floating IPs for the public network with Neutron
+ floating_ip_range:
+ - 192.168.38.200
+ - 192.168.38.220
+ # Usable IP range for overcloud nodes (including VIPs)i
+ # usually this is a shared subnet.
+ # Cannot overlap with dhcp_range or introspection_range.
+ overcloud_ip_range:
+ - 192.168.38.10
+ - 192.168.38.199
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # VLAN tag to use with this NIC
+ vlan: 101
+ # Physical NIC members of this mappingi
+ # Single value allowed for phys_type: interface
+ # Note: logical names like nic1 are not valid for fdio deployment yet.
+ members:
+ - eth3
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ phys_type: interface
+ vlan: 101
+ members:
+ - eth3
+ # External network to be created in OpenStack by Services tenant
+ external_overlay:
+ name: private_cloud
+ type: vlan
+ segmentation_id: 101
+ gateway: 192.168.38.1
+
+ # Storage network configuration
+ storage:
+ enabled: true
+ # Subnet in CIDR format
+ cidr: 12.0.0.0/24
+ # Storage network MTU
+ mtu: 1500
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # VLAN tag to use with this NIC
+ vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ members:
+ # Note logical names like nic1 not valid for fdio deployment yet.
+ - eth3
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ phys_type: interface
+ vlan: native
+ members:
+ - eth3
+
+ api:
+ # API network configuration
+ enabled: false
+ # Subnet in CIDR format
+ cidr: fd00:fd00:fd00:4000::/64
+ # VLAN tag to use for Overcloud hosts on this network
+ vlan: 13
+ # Api network MTU
+ mtu: 1500
+ # Mapping of network configuration for Overcloud Nodes
+ nic_mapping:
+ # Mapping for compute profile (nodes assigned as Compute nodes)
+ compute:
+ # Physical interface type (interface or bond)
+ phys_type: interface
+ # VLAN tag to use with this NIC
+ vlan: native
+ # Physical NIC members of this mapping
+ # Single value allowed for phys_type: interface
+ # Note logical names like nic1 not valid for fdio deployment yet.
+ members:
+ - eth4
+ # Mapping for controller profile (nodes assigned as Controller nodes)
+ controller:
+ phys_type: interface
+ vlan: native
+ members:
+ - eth4
+
+# Apex specific settings
+apex:
+ networks:
+ admin:
+ # Range used for introspection phase (examining nodes).
+ # This cannot overlap with dhcp_range or overcloud_ip_range.
+ # for the overcloud default external network
+ introspection_range:
+ - 192.0.2.100
+ - 192.0.2.120
diff --git a/config/network/network_settings_v6.yaml b/config/network/network_settings_v6.yaml
index 176bc7ca..1dd1097d 100644
--- a/config/network/network_settings_v6.yaml
+++ b/config/network/network_settings_v6.yaml
@@ -116,6 +116,12 @@ networks:
mtu: 1500
# Tenant network Overlay segmentation ID range:
# VNI, VLAN-ID, etc.
+ # VLAN config should follow the pattern of neutron ml2 network_vlan_ranges
+ # allowed patterns are <physical_network> or
+ # <physical_network>:<vlan_min>:<vlan_max>
+ # Note that for VLAN config, physnet 'datacentre:1:1000' is used for
+ # external networks by default and will be automatically added to
+ # Neutron config if the range specified here does not include datacentre.
overlay_id_range: 2,65535
# Tenant network segmentation type: