diff options
-rwxr-xr-x | build/barometer-install.sh | 4 | ||||
-rw-r--r-- | build/nics-template.yaml.jinja2 | 11 | ||||
-rw-r--r-- | build/opnfv-environment.yaml | 3 | ||||
-rwxr-xr-x | build/overcloud-full.sh | 51 | ||||
-rwxr-xr-x | build/overcloud-opendaylight.sh | 4 | ||||
-rw-r--r-- | build/patches/congress-parallel-execution.patch | 86 | ||||
-rw-r--r-- | build/rpm_specs/opnfv-apex-common.spec | 4 | ||||
-rw-r--r-- | config/deploy/os-odl-fdio-dvr-ha.yaml | 36 | ||||
-rw-r--r-- | config/deploy/os-odl-fdio-dvr-noha.yaml | 36 | ||||
-rw-r--r-- | config/network/network_settings.yaml | 4 | ||||
-rw-r--r-- | config/network/network_settings_v6.yaml | 4 | ||||
-rw-r--r-- | config/network/network_settings_vlans.yaml | 4 | ||||
-rw-r--r-- | config/network/network_settings_vpp.yaml | 4 | ||||
-rwxr-xr-x | lib/overcloud-deploy-functions.sh | 45 | ||||
-rwxr-xr-x | lib/python/apex_python_utils.py | 13 |
15 files changed, 258 insertions, 51 deletions
diff --git a/build/barometer-install.sh b/build/barometer-install.sh index 3f015be1..a6b3f911 100755 --- a/build/barometer-install.sh +++ b/build/barometer-install.sh @@ -137,8 +137,8 @@ function barometer_pkgs { # install puppet-barometer module # make directories for config files and mibs LIBGUESTFS_BACKEND=direct virt-customize \ - --run-command 'mkdir /opt/collectd-ceilometer' \ - --run-command "tar xfz /opt/collectd-ceilometer-plugin.tar.gz -C /opt/collectd-ceilometer" \ + --run-command 'mkdir /opt/stack/collectd-ceilometer' \ + --run-command "tar xfz /opt/collectd-ceilometer-plugin.tar.gz -C /opt/stack/collectd-ceilometer" \ --run-command "cd /etc/puppet/modules/ && mkdir barometer && \ tar xzf puppet-barometer.tar.gz -C barometer" \ --run-command 'mkdir /usr/share/mibs/' \ diff --git a/build/nics-template.yaml.jinja2 b/build/nics-template.yaml.jinja2 index 23016183..93d3dc14 100644 --- a/build/nics-template.yaml.jinja2 +++ b/build/nics-template.yaml.jinja2 @@ -131,8 +131,17 @@ resources: ip_netmask: {get_param: InternalApiIpSubnet} {%- endif %} {%- else %} - type: interface + type: {{ nets['admin']['nic_mapping'][role]['phys_type'] }} + {%- if nets['admin']['nic_mapping'][role]['phys_type'] == 'linux_bridge' %} + name: br-ctlplane + members: + - + type: interface + name: {{ nets['admin']['nic_mapping'][role]['members'][0] }} + primary: true + {%- else %} name: {{ nets['admin']['nic_mapping'][role]['members'][0] }} + {%- endif %} {%- endif %} use_dhcp: false dns_servers: {get_param: DnsServers} diff --git a/build/opnfv-environment.yaml b/build/opnfv-environment.yaml index 8140d0d7..0f3dd701 100644 --- a/build/opnfv-environment.yaml +++ b/build/opnfv-environment.yaml @@ -11,7 +11,10 @@ parameter_defaults: NeutronEnableForceMetadata: true NeutronEnableDHCPMetadata: true NeutronEnableIsolatedMetadata: true + #NeutronDhcpAgentsPerNetwork: 3 NeutronPluginExtensions: 'qos,port_security,data_plane_status' + # TODO: VLAN Ranges should be configurable from network settings + NeutronNetworkVLANRanges: 'datacentre:500:525' # NeutronVPPAgentPhysnets: # NovaSchedulerDefaultFilters: # # Kernel arguments, this value will be set to kernel arguments specified diff --git a/build/overcloud-full.sh b/build/overcloud-full.sh index f1ec8650..b821a15c 100755 --- a/build/overcloud-full.sh +++ b/build/overcloud-full.sh @@ -41,8 +41,8 @@ rm -rf vsperf vsperf.tar.gz git clone https://gerrit.opnfv.org/gerrit/vswitchperf vsperf tar czf vsperf.tar.gz vsperf -# Increase disk size by 900MB to accommodate more packages -qemu-img resize overcloud-full_build.qcow2 +900MB +# Increase disk size by 1200MB to accommodate more packages +qemu-img resize overcloud-full_build.qcow2 +1200MB # expand file system to max disk size # installing forked apex-puppet-tripleo @@ -68,11 +68,13 @@ LIBGUESTFS_BACKEND=direct virt-customize \ --upload ${BUILD_ROOT}/patches/neutron_openstacksdk_dps.patch:/usr/lib/python2.7/site-packages/ \ --upload ${BUILD_ROOT}/patches/neutron_openstackclient_dps.patch:/usr/lib/python2.7/site-packages/ \ --upload ${BUILD_ROOT}/patches/puppet-neutron-add-sfc.patch:/usr/share/openstack-puppet/modules/neutron/ \ + --upload ${BUILD_ROOT}/patches/congress-parallel-execution.patch:/usr/lib/python2.7/site-packages/ \ -a overcloud-full_build.qcow2 # apply neutron port data plane status patches # https://specs.openstack.org/openstack/neutron-specs/specs/backlog/ocata/port-data-plane-status.html -# Requirement from Doctor project +# apply congress parallel execution patch +# Requirements from Doctor project # TODO(cgoncalves): code merged in Pike dev cycle. drop from >= OpenStack Pike / > OPNFV Euphrates LIBGUESTFS_BACKEND=direct virt-customize \ --run-command "cd /usr/lib/python2.7/site-packages/ && patch -p1 < neutron_lib_dps.patch " \ @@ -80,6 +82,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \ --run-command "cd /usr/lib/python2.7/site-packages/ && patch -p1 < neutron_openstacksdk_dps.patch" \ --run-command "cd /usr/lib/python2.7/site-packages/ && patch -p1 < neutron_openstackclient_dps.patch" \ --run-command "cd /usr/share/openstack-puppet/modules/neutron && patch -p1 < puppet-neutron-add-sfc.patch" \ + --run-command "cd /usr/lib/python2.7/site-packages/ && patch -p1 < congress-parallel-execution.patch" \ -a overcloud-full_build.qcow2 # Arch dependent on x86 @@ -118,9 +121,6 @@ for package in ${fdio_pkgs[@]}; do fdio_pkg_str+=" --upload ${BUILD_DIR}/${package##*/}:/root/fdio/" done -# Increase disk size by 900MB to accommodate more packages -qemu-img resize overcloud-full_build.qcow2 +900MB - # upload dpdk rpms but do not install # install fd.io yum repo and packages # upload puppet fdio @@ -131,6 +131,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \ $dpdk_pkg_str \ --upload ${BUILD_DIR}/puppet-fdio.tar.gz:/etc/puppet/modules \ --run-command "cd /etc/puppet/modules && tar xzf puppet-fdio.tar.gz" \ + --upload ${BUILD_DIR}/fdio.repo:/etc/yum.repos.d/ \ --run-command "mkdir /root/fdio" \ --upload ${BUILD_DIR}/noarch/$netvpp_pkg:/root/fdio \ $fdio_pkg_str \ @@ -147,28 +148,26 @@ LIBGUESTFS_BACKEND=direct virt-customize \ --upload ${BUILD_ROOT}/patches/puppet-neutron-vpp-ml2-type_drivers-setting.patch:/usr/share/openstack-puppet/modules/neutron/ \ --run-command "cd /usr/share/openstack-puppet/modules/neutron && patch -p1 < puppet-neutron-vpp-ml2-type_drivers-setting.patch" \ -a overcloud-full_build.qcow2 -fi # upload and install barometer packages -if [ "$(uname -i)" == 'x86_64' ]; then barometer_pkgs overcloud-full_build.qcow2 -fi - -# Build OVS with NSH -rm -rf ovs_nsh_patches -rm -rf ovs -git clone https://github.com/yyang13/ovs_nsh_patches.git -git clone https://github.com/openvswitch/ovs.git -pushd ovs > /dev/null -git checkout v2.6.1 -cp ../ovs_nsh_patches/v2.6.1/*.patch ./ -cp ${BUILD_ROOT}/patches/ovs-fix-build-on-RHEL-7.3.patch ./ -# Hack for build servers that have no git config -git config user.email "apex@opnfv.com" -git config user.name "apex" -git am *.patch -popd > /dev/null -tar czf ovs.tar.gz ovs + + # Build OVS with NSH + rm -rf ovs_nsh_patches + rm -rf ovs + git clone https://github.com/yyang13/ovs_nsh_patches.git + git clone https://github.com/openvswitch/ovs.git + pushd ovs > /dev/null + git checkout v2.6.1 + cp ../ovs_nsh_patches/v2.6.1/*.patch ./ + cp ${BUILD_ROOT}/patches/ovs-fix-build-on-RHEL-7.3.patch ./ + # Hack for build servers that have no git config + git config user.email "apex@opnfv.com" + git config user.name "apex" + git am *.patch + popd > /dev/null + tar czf ovs.tar.gz ovs + LIBGUESTFS_BACKEND=direct virt-customize \ --upload ${BUILD_ROOT}/build_ovs_nsh.sh:/root/ \ @@ -177,5 +176,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \ --run-command "cd /root/ovs && /root/build_ovs_nsh.sh" \ -a overcloud-full_build.qcow2 +fi # end x86_64 specific items + mv -f overcloud-full_build.qcow2 overcloud-full.qcow2 popd > /dev/null diff --git a/build/overcloud-opendaylight.sh b/build/overcloud-opendaylight.sh index 82ff8f74..ae5764c2 100755 --- a/build/overcloud-opendaylight.sh +++ b/build/overcloud-opendaylight.sh @@ -49,9 +49,6 @@ popd > /dev/null # Download ODL netvirt for VPP populate_cache http://artifacts.opnfv.org/apex/danube/fdio_netvirt/opendaylight-7.0.0-0.1.20170531snap665.el7.noarch.rpm -# Download ODL for fdio scenarios -populate_cache http://artifacts.opnfv.org/apex/danube/fdio_odls/fdio_odl_carbon.tar.gz - # install ODL packages # Patch in OPNFV custom puppet-tripleO # install Honeycomb @@ -72,7 +69,6 @@ LIBGUESTFS_BACKEND=direct virt-customize \ --install capnproto-libs,capnproto \ --upload ${BUILD_ROOT}/patches/neutron-patch-NSDriver.patch:/usr/lib/python2.7/site-packages/ \ --upload ${CACHE_DIR}/opendaylight-7.0.0-0.1.20170531snap665.el7.noarch.rpm:/root/ \ - --upload ${CACHE_DIR}/fdio_odl_carbon.tar.gz:/root/ \ -a overcloud-full-opendaylight_build.qcow2 # Arch dependent on x86 diff --git a/build/patches/congress-parallel-execution.patch b/build/patches/congress-parallel-execution.patch new file mode 100644 index 00000000..ca48c6f3 --- /dev/null +++ b/build/patches/congress-parallel-execution.patch @@ -0,0 +1,86 @@ +From 02ff94adb9bc433549f5b3483f36b2ede19b3614 Mon Sep 17 00:00:00 2001 +From: Masahito Muroi <muroi.masahito@lab.ntt.co.jp> +Date: Tue, 18 Apr 2017 04:22:24 +0900 +Subject: [PATCH] Parallel execution in DataSource Driver + +Datasource driver calls datasource's API serially when Policy Engine sends +execution requests. It could take long time number of execution targets is +a lots. + +This patch changes datasource driver calls datasource's API in parallel. + +Closes-Bug: #1670529 +Change-Id: I065bd625004401a1bb78c6d56d929bdaf76d37f0 +--- + congress/datasources/datasource_driver.py | 15 +++++++++------ + congress/policy_engines/agnostic.py | 6 ++++-- + 2 files changed, 13 insertions(+), 8 deletions(-) + +diff --git a/congress/datasources/datasource_driver.py b/congress/datasources/datasource_driver.py +index eec83017..8eeb62d7 100644 +--- a/congress/datasources/datasource_driver.py ++++ b/congress/datasources/datasource_driver.py +@@ -1176,8 +1176,8 @@ class DataSourceDriverEndpoints(data_service.DataServiceEndPoints): + def request_refresh(self, context, source_id): + return self.service.request_refresh() + +- def request_execute(self, context, action, action_args): +- return self.service.request_execute(context, action, action_args) ++ def request_execute(self, context, action, action_args, wait): ++ return self.service.request_execute(context, action, action_args, wait) + + + class PushedDataSourceDriver(DataSourceDriver): +@@ -1574,18 +1574,21 @@ class ExecutionDriver(object): + return {'results': actions} + + # Note(thread-safety): blocking function +- def request_execute(self, context, action, action_args): ++ def request_execute(self, context, action, action_args, wait): + """Accept execution requests and execute requests from leader""" + node_id = context.get('node_id', None) ++ th = None + if self._leader_node_id == node_id: +- # Note(thread-safety): blocking call +- self.execute(action, action_args) ++ # Note(thread-safety): blocking call ++ th = eventlet.spawn(self.execute, action, action_args) + elif node_id is not None: + if self._leader_node_id is None: + self._leader_node_id = node_id + LOG.debug('New local leader %s selected', self._leader_node_id) + # Note(thread-safety): blocking call +- self.execute(action, action_args) ++ th = eventlet.spawn(self.execute, action, action_args) ++ if wait and th: ++ th.wait() + + # Note(thread-safety): blocking function (in some subclasses) + def execute(self, action, action_args): +diff --git a/congress/policy_engines/agnostic.py b/congress/policy_engines/agnostic.py +index d1d67bdc..df09ed96 100644 +--- a/congress/policy_engines/agnostic.py ++++ b/congress/policy_engines/agnostic.py +@@ -2021,7 +2021,9 @@ class DseRuntime (Runtime, data_service.DataService): + """Overloading the DseRuntime version of _rpc so it uses dse2.""" + # TODO(ramineni): This is called only during execute_action, added + # the same function name for compatibility with old arch +- args = {'action': action, 'action_args': args} ++ ++ retry_rpc = cfg.CONF.dse.execute_action_retry ++ args = {'action': action, 'action_args': args, 'wait': retry_rpc} + + def execute_once(): + return self.rpc(service_name, 'request_execute', args, +@@ -2045,7 +2047,7 @@ class DseRuntime (Runtime, data_service.DataService): + action, args['action_args']) + + # long timeout for action execution because actions can take a while +- if not cfg.CONF.dse.execute_action_retry: ++ if not retry_rpc: + # Note(thread-safety): blocking call + # Only when thread pool at capacity + eventlet.spawn_n(execute_once) +-- +2.12.3 + diff --git a/build/rpm_specs/opnfv-apex-common.spec b/build/rpm_specs/opnfv-apex-common.spec index 15f62c1f..ccb100f3 100644 --- a/build/rpm_specs/opnfv-apex-common.spec +++ b/build/rpm_specs/opnfv-apex-common.spec @@ -57,6 +57,8 @@ install config/deploy/os-odl-sfc-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex install config/deploy/os-odl-fdio-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-fdio-noha.yaml install config/deploy/os-odl_netvirt-fdio-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl_netvirt-fdio-noha.yaml install config/deploy/os-odl-fdio-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-fdio-ha.yaml +install config/deploy/os-odl-fdio-dvr-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-fdio-dvr-ha.yaml +install config/deploy/os-odl-fdio-dvr-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-fdio-dvr-noha.yaml install config/deploy/os-odl-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-nofeature-ha.yaml install config/deploy/os-odl-nofeature-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-nofeature-noha.yaml install config/deploy/os-odl-ovs_dpdk-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-ha.yaml @@ -146,6 +148,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/ %{_sysconfdir}/opnfv-apex/os-odl-fdio-noha.yaml %{_sysconfdir}/opnfv-apex/os-odl_netvirt-fdio-noha.yaml %{_sysconfdir}/opnfv-apex/os-odl-fdio-ha.yaml +%{_sysconfdir}/opnfv-apex/os-odl-fdio-dvr-ha.yaml +%{_sysconfdir}/opnfv-apex/os-odl-fdio-dvr-noha.yaml %{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-noha.yaml %{_sysconfdir}/opnfv-apex/os-odl-ovs_dpdk-ha.yaml %{_sysconfdir}/opnfv-apex/os-odl-nofeature-ha.yaml diff --git a/config/deploy/os-odl-fdio-dvr-ha.yaml b/config/deploy/os-odl-fdio-dvr-ha.yaml new file mode 100644 index 00000000..564cf0b6 --- /dev/null +++ b/config/deploy/os-odl-fdio-dvr-ha.yaml @@ -0,0 +1,36 @@ +global_params: + ha_enabled: true + +deploy_options: + sdn_controller: opendaylight + odl_version: carbon + odl_vpp_routing_node: dvr + tacker: true + congress: true + sfc: false + vpn: false + vpp: true + dataplane: fdio + performance: + Controller: + kernel: + hugepages: 1024 + hugepagesz: 2M + intel_iommu: 'on' + iommu: pt + isolcpus: 1,2 + vpp: + main-core: 1 + corelist-workers: 2 + uio-driver: uio_pci_generic + Compute: + kernel: + hugepagesz: 2M + hugepages: 2048 + intel_iommu: 'on' + iommu: pt + isolcpus: 1,2 + vpp: + main-core: 1 + corelist-workers: 2 + uio-driver: uio_pci_generic diff --git a/config/deploy/os-odl-fdio-dvr-noha.yaml b/config/deploy/os-odl-fdio-dvr-noha.yaml new file mode 100644 index 00000000..24c433bd --- /dev/null +++ b/config/deploy/os-odl-fdio-dvr-noha.yaml @@ -0,0 +1,36 @@ +global_params: + ha_enabled: false + +deploy_options: + sdn_controller: opendaylight + odl_version: carbon + odl_vpp_routing_node: dvr + tacker: true + congress: true + sfc: false + vpn: false + vpp: true + dataplane: fdio + performance: + Controller: + kernel: + hugepages: 1024 + hugepagesz: 2M + intel_iommu: 'on' + iommu: pt + isolcpus: 1,2 + vpp: + main-core: 1 + corelist-workers: 2 + uio-driver: uio_pci_generic + Compute: + kernel: + hugepagesz: 2M + hugepages: 2048 + intel_iommu: 'on' + iommu: pt + isolcpus: 1,2 + vpp: + main-core: 1 + corelist-workers: 2 + uio-driver: uio_pci_generic diff --git a/config/network/network_settings.yaml b/config/network/network_settings.yaml index 88753346..fe11a9b5 100644 --- a/config/network/network_settings.yaml +++ b/config/network/network_settings.yaml @@ -78,7 +78,7 @@ networks: # If empty entire range is usable. # Cannot overlap with dhcp_range or introspection_range. overcloud_ip_range: - - 192.0.2.11 + - 192.0.2.51 - 192.0.2.99 # Gateway (only needed when public_network is disabled) gateway: 192.0.2.1 @@ -87,7 +87,7 @@ networks: # DHCP range for the admin network, automatically provisioned if empty dhcp_range: - 192.0.2.2 - - 192.0.2.10 + - 192.0.2.50 # Mapping of network configuration for Overcloud Nodes nic_mapping: # Mapping for compute profile (nodes assigned as Compute nodes) diff --git a/config/network/network_settings_v6.yaml b/config/network/network_settings_v6.yaml index 57257633..25aaee89 100644 --- a/config/network/network_settings_v6.yaml +++ b/config/network/network_settings_v6.yaml @@ -78,7 +78,7 @@ networks: # If empty entire range is usable. # Cannot overlap with dhcp_range or introspection_range. overcloud_ip_range: - - 192.0.2.11 + - 192.0.2.51 - 192.0.2.99 # Gateway (only needed when public_network is disabled) gateway: 192.0.2.1 @@ -87,7 +87,7 @@ networks: # DHCP range for the admin network, automatically provisioned if empty dhcp_range: - 192.0.2.2 - - 192.0.2.10 + - 192.0.2.50 # Mapping of network configuration for Overcloud Nodes nic_mapping: # Mapping for compute profile (nodes assigned as Compute nodes) diff --git a/config/network/network_settings_vlans.yaml b/config/network/network_settings_vlans.yaml index be4dc662..345dbbde 100644 --- a/config/network/network_settings_vlans.yaml +++ b/config/network/network_settings_vlans.yaml @@ -78,7 +78,7 @@ networks: # If empty entire range is usable. # Cannot overlap with dhcp_range or introspection_range. overcloud_ip_range: - - 192.0.2.11 + - 192.0.2.51 - 192.0.2.99 # Gateway (only needed when public_network is disabled) gateway: 192.0.2.1 @@ -87,7 +87,7 @@ networks: # DHCP range for the admin network, automatically provisioned if empty dhcp_range: - 192.0.2.2 - - 192.0.2.10 + - 192.0.2.50 # Mapping of network configuration for Overcloud Nodes nic_mapping: # Mapping for compute profile (nodes assigned as Compute nodes) diff --git a/config/network/network_settings_vpp.yaml b/config/network/network_settings_vpp.yaml index f080af1d..2f6bba5e 100644 --- a/config/network/network_settings_vpp.yaml +++ b/config/network/network_settings_vpp.yaml @@ -78,7 +78,7 @@ networks: # If empty entire range is usable. # Cannot overlap with dhcp_range or introspection_range. overcloud_ip_range: - - 192.0.2.11 + - 192.0.2.51 - 192.0.2.99 # Gateway (only needed when public_network is disabled) gateway: 192.0.2.1 @@ -87,7 +87,7 @@ networks: # DHCP range for the admin network, automatically provisioned if empty dhcp_range: - 192.0.2.2 - - 192.0.2.10 + - 192.0.2.50 # Mapping of network configuration for Overcloud Nodes nic_mapping: # Mapping for compute profile (nodes assigned as Compute nodes) diff --git a/lib/overcloud-deploy-functions.sh b/lib/overcloud-deploy-functions.sh index 1f25a365..0fe838d0 100755 --- a/lib/overcloud-deploy-functions.sh +++ b/lib/overcloud-deploy-functions.sh @@ -25,6 +25,10 @@ function overcloud_deploy { DEPLOY_OPTIONS+=" -e ${ENV_FILE}" DEPLOY_OPTIONS+=" -e network-environment.yaml" + # get number of nodes available in inventory + num_control_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:control /home/stack/instackenv.json") + num_compute_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:compute /home/stack/instackenv.json") + # Custom Deploy Environment Templates if [[ "${#deploy_options_array[@]}" -eq 0 || "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then if [ "${deploy_options_array['sfc']}" == 'True' ]; then @@ -37,6 +41,8 @@ function overcloud_deploy { elif [ "${deploy_options_array['vpp']}" == 'True' ]; then if [ "${deploy_options_array['odl_vpp_netvirt']}" == "True" ]; then DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-netvirt-vpp.yaml" + elif [ "${deploy_options_array['odl_vpp_routing_node']}" == "dvr" ]; then + DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-fdio-dvr.yaml" else DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/neutron-opendaylight-honeycomb.yaml" fi @@ -183,13 +189,29 @@ EOI -a overcloud-full.qcow2 EOI - # Configure routing node and interface role mapping for odl-fdio - if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "${deploy_options_array['odl_vpp_routing_node']}" != 'dvr' ]]; then + # Configure routing node for odl-fdio + if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then + if [[ "${deploy_options_array['odl_vpp_routing_node']}" == 'dvr' ]]; then + ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI + sed -i "/OS::TripleO::Services::NeutronDhcpAgent/d" ${ENV_FILE} + sed -i "/NeutronDhcpAgentsPerNetwork:/ c\ NeutronDhcpAgentsPerNetwork: $num_compute_nodes" ${ENV_FILE} + sed -i "$ a\ - OS::TripleO::Services::NeutronDhcpAgent" ${ENV_FILE} +# TODO: Update VPP version to 17.10 when specific version is known +# LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum remove -y vpp-lib" \ +# --run-command "yum install -y /root/fdio_dvr/*.rpm" \ +# --run-command "rm -f /etc/sysctl.d/80-vpp.conf" \ +# -a overcloud-full.qcow2 +EOI + else + ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI + sed -i "/opendaylight::vpp_routing_node:/c\ opendaylight::vpp_routing_node: ${deploy_options_array['odl_vpp_routing_node']}.${domain_name}" ${ENV_FILE} +EOI + fi ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI - sed -i "/opendaylight::vpp_routing_node:/c\ opendaylight::vpp_routing_node: ${deploy_options_array['odl_vpp_routing_node']}.${domain_name}" ${ENV_FILE} sed -i "/ControllerExtraConfig:/ c\ ControllerExtraConfig:\n tripleo::profile::base::neutron::agents::honeycomb::interface_role_mapping: ['${tenant_nic_mapping_controller_members}:tenant-interface']" ${ENV_FILE} sed -i "/NovaComputeExtraConfig:/ c\ NovaComputeExtraConfig:\n tripleo::profile::base::neutron::agents::honeycomb::interface_role_mapping: ['${tenant_nic_mapping_compute_members}:tenant-interface','${external_nic_mapping_compute_members}:public-interface']" ${ENV_FILE} EOI + fi fi @@ -300,6 +322,19 @@ EOI # fi fi + # Override ODL if we enable dvr for fdio +# TODO: Update ODL version when specific version is known. +# if [[ "${deploy_options_array['odl_vpp_routing_node']}" == 'dvr' ]]; then +# ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI +# LIBGUESTFS_BACKEND=direct virt-customize --run-command "rm -rf /opt/opendaylight/*" \ +# --run-command "tar zxvf /root/fdio_odl_carbon.tar.gz -C /opt/opendaylight/ --strip-components=1" \ +# --run-command "chown odl:odl -R /opt/opendaylight" \ +# -a overcloud-full.qcow2 +#EOI +# fi + + + # check if ceph should be enabled if [ "${deploy_options_array['ceph']}" == 'True' ]; then DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" @@ -316,10 +351,6 @@ EOI EOI fi - # get number of nodes available in inventory - num_control_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:control /home/stack/instackenv.json") - num_compute_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "grep -c profile:compute /home/stack/instackenv.json") - # check if HA is enabled if [[ "$ha_enabled" == "True" ]]; then if [ "$num_control_nodes" -lt 3 ]; then diff --git a/lib/python/apex_python_utils.py b/lib/python/apex_python_utils.py index c4c6dfe8..70fc592d 100755 --- a/lib/python/apex_python_utils.py +++ b/lib/python/apex_python_utils.py @@ -113,14 +113,19 @@ def build_nic_template(args): if ds['dataplane'] == 'fdio': nets['tenant']['nic_mapping'][args.role]['phys_type'] = 'vpp_interface' - nets['external'][0]['nic_mapping'][args.role]['phys_type'] =\ - 'vpp_interface' + if ds['sdn_controller'] == 'opendaylight': + nets['external'][0]['nic_mapping'][args.role]['phys_type'] =\ + 'vpp_interface' + if ds.get('odl_vpp_routing_node') == 'dvr': + nets['admin']['nic_mapping'][args.role]['phys_type'] =\ + 'linux_bridge' if ds.get('performance', {}).get(args.role.title(), {}).get('vpp', {})\ .get('uio-driver'): nets['tenant']['nic_mapping'][args.role]['uio-driver'] =\ ds['performance'][args.role.title()]['vpp']['uio-driver'] - nets['external'][0]['nic_mapping'][args.role]['uio-driver'] =\ - ds['performance'][args.role.title()]['vpp']['uio-driver'] + if ds['sdn_controller'] == 'opendaylight': + nets['external'][0]['nic_mapping'][args.role]['uio-driver'] =\ + ds['performance'][args.role.title()]['vpp']['uio-driver'] if ds.get('performance', {}).get(args.role.title(), {}).get('vpp', {})\ .get('interface-options'): nets['tenant']['nic_mapping'][args.role]['interface-options'] =\ |