summaryrefslogtreecommitdiffstats
path: root/build
diff options
context:
space:
mode:
Diffstat (limited to 'build')
-rw-r--r--build/Makefile60
-rw-r--r--build/build_perf_image.sh54
-rwxr-xr-xbuild/build_quagga.sh250
-rw-r--r--build/first-boot.yaml53
-rw-r--r--build/neutron-patch-NSDriver.patch208
-rw-r--r--build/neutron/agent/interface/interface.py552
-rw-r--r--build/neutron/agent/l3/namespaces.py142
-rw-r--r--build/neutron/agent/l3/router_info.py996
-rw-r--r--build/nics-template.yaml.jinja25
-rw-r--r--build/opnfv-environment.yaml13
-rwxr-xr-xbuild/overcloud-full.sh16
-rwxr-xr-xbuild/overcloud-opendaylight.sh10
-rw-r--r--build/patches/fix_quagga_make_dist.patch28
-rw-r--r--build/patches/fix_zrpcd_make_dist.patch29
-rw-r--r--build/patches/zrpcd_hardcoded_paths.patch58
-rw-r--r--build/puppet-neutron-add-odl-settings.patch47
-rw-r--r--build/puppet-neutron/lib/puppet/provider/neutron_agent_vpp/ini_setting.rb15
-rw-r--r--build/puppet-neutron/lib/puppet/type/neutron_agent_vpp.rb28
-rw-r--r--build/puppet-neutron/manifests/agents/ml2/networking-vpp.pp65
-rw-r--r--build/puppet-neutron/manifests/plugins/ml2/networking-vpp.pp51
-rw-r--r--build/rpm_specs/c_capnproto.spec45
-rw-r--r--build/rpm_specs/opnfv-apex-common.spec16
-rw-r--r--build/rpm_specs/quagga.spec748
-rw-r--r--build/rpm_specs/zrpc.spec46
-rw-r--r--build/set_perf_images.sh50
-rwxr-xr-xbuild/undercloud.sh9
-rw-r--r--build/variables.sh4
27 files changed, 1613 insertions, 1985 deletions
diff --git a/build/Makefile b/build/Makefile
index fb0d0c32..3a26b4cb 100644
--- a/build/Makefile
+++ b/build/Makefile
@@ -36,6 +36,9 @@ export RPMVERS = $(shell grep Version $(shell pwd)/rpm_specs/opnfv-apex.spec | h
export BUILD_ROOT = $(shell pwd)
export BUILD_DIR = $(shell dirname $$(pwd))/.build
export CACHE_DIR = $(shell dirname $$(pwd))/.cache
+export PATCHES_DIR = $(BUILD_ROOT)/patches
+export QUAGGA_BUILD_DIR = $(BUILD_DIR)/quagga_build_dir
+export QUAGGA_RPMS_DIR = $(QUAGGA_BUILD_DIR)/rpmbuild
export RPM_DIR_ARGS = -D '_topdir $(BUILD_DIR)' -D '_builddir $(BUILD_DIR)' -D '_sourcedir $(BUILD_DIR)' -D '_rpmdir $(BUILD_DIR)' -D '_specdir $(BUILD_DIR)' -D '_srcrpmdir $(BUILD_DIR)'
export RPMREL = $(BUILD_DIR)/noarch/opnfv-apex-release-$(RPMVERS)-$(shell echo ${RELEASE} | tr -d '_-').noarch.rpm
@@ -242,7 +245,7 @@ $(BUILD_DIR)/overcloud-full.qcow2: congress-rpm tacker-rpm networking-vpp-rpm
###############
.PHONY: overcloud-opendaylight
-overcloud-opendaylight: $(BUILD_DIR)/overcloud-full-opendaylight.qcow2
+overcloud-opendaylight: quagga-zrpc $(BUILD_DIR)/overcloud-full-opendaylight.qcow2
$(BUILD_DIR)/overcloud-full-opendaylight.qcow2: $(BUILD_DIR)/overcloud-full.qcow2
@echo "Building the Apex OpenDaylight Overcloud Image"
@@ -369,3 +372,58 @@ python3-markupsafe:
&& sed -i 's/python3-pytest/python34-pytest/' python-markupsafe.spec \
&& sed -i 's/python3-markupsafe/python34-markupsafe/' python-markupsafe.spec \
&& rpmbuild -ba python-markupsafe.spec $(RPM_DIR_ARGS) -D "with_python3 1"
+
+##################
+# Quagga Clean #
+##################
+.PHONY: quagga-clean
+quagga-clean:
+ @rm -rf $(QUAGGA_BUILD_DIR)
+ @sudo yum -y remove zrpc* quagga* c-capnproto* thrift*
+
+#################
+# Quagga+ZRPC #
+#################
+.PHONY: quagga-zrpc
+quagga-zrpc: quagga-clean thrift-rpm capnproto-rpm quagga-rpm zrpc-rpm
+
+##########
+# ZRPC #
+##########
+.PHONY: zrpc-rpm
+zrpc-rpm: quagga-rpm $(QUAGGA_RPMS_DIR)/zrpcd-%.x86_64.rpm
+
+$(QUAGGA_RPMS_DIR)/zrpcd-%.x86_64.rpm:
+ @echo "Building ZRPC RPM"
+ @./build_quagga.sh -a zrpc
+
+############
+# Quagga #
+############
+.PHONY: quagga-rpm
+quagga-rpm: $(QUAGGA_RPMS_DIR)/RPMS/x86_64/quagga-1.1.0_%.el7.centos.x86_64.rpm
+
+$(QUAGGA_RPMS_DIR)/RPMS/x86_64/quagga-1.1.0_%.el7.centos.x86_64.rpm:
+ @echo "Building Quagga RPM"
+ @./build_quagga.sh -a quagga
+
+###############
+# Capnproto #
+###############
+.PHONY: capnproto-rpm
+capnproto-rpm: $(QUAGGA_RPMS_DIR)/RPMS/x86_64/c-capnproto-%.x86_64.rpm
+
+$(QUAGGA_RPMS_DIR)/RPMS/x86_64/c-capnproto-%.x86_64.rpm:
+ @echo "Building capnproto RPMs"
+ @./build_quagga.sh -a capnproto
+
+############
+# Thrift #
+############
+
+.PHONY: thrift-rpm
+thrift-rpm: $(QUAGGA_RPMS_DIR)/RPMS/x86_64/thrift-%.x86_64.rpm
+
+$(QUAGGA_RPMS_DIR)/RPMS/x86_64/thrift-%.x86_64.rpm:
+ @echo "Building Thrift RPMs"
+ @./build_quagga.sh -a thrift
diff --git a/build/build_perf_image.sh b/build/build_perf_image.sh
deleted file mode 100644
index 68f74ea2..00000000
--- a/build/build_perf_image.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2016 Red Hat Inc.
-# Michael Chapman <michapma@redhat.com>
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-ROLE=$1
-shift
-CATEGORY=$1
-shift
-KEY=$1
-shift
-VALUE=$1
-shift
-
-IMAGE=$ROLE-overcloud-full.qcow2
-
-# Create image copy for this role
-if [ ! -f $IMAGE ] ; then
- cp overcloud-full.qcow2 $IMAGE
-fi
-
-if [ "$CATEGORY" == "nova" ]; then
- if [ "$KEY" == "libvirtpin" ]; then
- sudo sed -i "s/#LibvirtCPUPinSet:.*/LibvirtCPUPinSet: '${VALUE}'/" /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
- fi
-fi
-
-if [ "$CATEGORY" == "kernel" ]; then
- echo "${KEY}=${VALUE}" >> $ROLE-kernel_params.txt
- if [[ "$dataplane" == 'fdio' && "$KEY" == 'hugepages' ]]; then
- # set kernel hugepages params for fdio
- LIBGUESTFS_BACKEND=direct virt-customize --run-command "echo vm.hugetlb_shm_group=0 >> /usr/lib/sysctl.d/00-system.conf" \
- --run-command "echo vm.max_map_count=$(printf "%.0f" $(echo 2.2*$VALUE | bc)) >> /usr/lib/sysctl.d/00-system.conf" \
- --run-command "echo kernel.shmmax==$((VALUE * 2 * 1024 * 1024)) >> /usr/lib/sysctl.d/00-system.conf" \
- -a ${IMAGE}
- fi
-fi
-
-if [ "$CATEGORY" == "vpp" ]; then
- if [ "$KEY" == "main-core" ]; then
- sudo sed -i "/${ROLE}VPPMainCore:/c\ ${ROLE}VPPMainCore: '${VALUE}'" /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
- fi
- if [ "$KEY" == "corelist-workers" ]; then
- sudo sed -i "/${ROLE}VPPCorelistWorkers:/c\ ${ROLE}VPPCorelistWorkers: '${VALUE}'" /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
- fi
- if [ "$KEY" == "uio-driver" ]; then
- sudo sed -i "/${ROLE}UIODriver:/c\ ${ROLE}UIODriver: '${VALUE}'" /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
- fi
-fi \ No newline at end of file
diff --git a/build/build_quagga.sh b/build/build_quagga.sh
new file mode 100755
index 00000000..7d298e57
--- /dev/null
+++ b/build/build_quagga.sh
@@ -0,0 +1,250 @@
+#!/usr/bin/env bash
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (Red Hat) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -xe
+
+ARTIFACT=None
+
+# Builds Quagga, Zebra and other dependency RPMs for CentOS 7
+# Install package dependencies
+install_quagga_build_deps() {
+ sudo yum -y install automake bison flex libtool make readline-devel \
+ texinfo texi2html rpm-build libcap-devel groff net-snmp-devel pam-devel glib2 glib2-devel epel-release spectool \
+ wget git gcc-c++ openssl-devel boost-devel boost-static gtest zeromq-4.1.4 zeromq-devel-4.1.4 \
+ capnproto-devel capnproto-libs capnproto
+ sudo yum -y groupinstall "Development Tools"
+}
+
+display_usage ()
+{
+cat << EOF
+$0 Builds Quagga/ZRPC and Dependency RPMs
+
+usage: $0 [ [-a | --artifact] artifact ]
+
+OPTIONS:
+ -a artifact to build (thrift, capnproto, quagga, zrpc). Default: All artifacts.
+ -c clean all build directories
+ -h help, prints this help text
+
+Example:
+build_quagga.sh -a thrift
+EOF
+}
+
+parse_cmdline() {
+ while [ "${1:0:1}" = "-" ]
+ do
+ case "$1" in
+ -h|--help)
+ display_usage
+ exit 0
+ ;;
+ -a|--artifact)
+ ARTIFACT=${2}
+ shift 2
+ ;;
+ -c|--clean)
+ CLEAN="True"
+ shift 1
+ ;;
+ *)
+ display_usage
+ exit 1
+ ;;
+ esac
+ done
+
+}
+
+# Removes build directory folder and re-creates RPM DIRs to use
+function quagga_clean(){
+ rm -rf ${QUAGGA_BUILD_DIR}
+ sudo yum remove -y zrpc* quagga* thrift* c-capnproto*
+}
+
+# Build Thrift RPM
+function build_thrift(){
+ rm -rf thrift
+ git clone https://git-wip-us.apache.org/repos/asf/thrift.git
+ pushd thrift
+ git checkout 0.10.0
+ wget https://issues.apache.org/jira/secure/attachment/12840511/0002-THRIFT-3986-using-autoreconf-i-fails-because-of-miss.patch
+ wget https://issues.apache.org/jira/secure/attachment/12840512/0001-THRIFT-3987-externalise-declaration-of-thrift-server.patch
+ patch -p1 < 0002-THRIFT-3986-using-autoreconf-i-fails-because-of-miss.patch
+ patch -p1 < 0001-THRIFT-3987-externalise-declaration-of-thrift-server.patch
+ autoreconf -i
+ ./configure --without-qt4 --without-qt5 --without-csharp --without-java \
+ --without-erlang --without-nodejs --without-perl --without-python \
+ --without-php --without-php_extension --without-dart --without-ruby \
+ --without-haskell --without-go --without-haxe --without-d
+ # Hack somehow the testing file of php is not there
+ # We will disable php anyhow later on.
+ touch lib/php/src/ext/thrift_protocol/run-tests.php
+ make dist
+ pushd contrib/
+ spectool -g -R thrift.spec
+ mv ../thrift-*.tar.gz $rpmbuild/SOURCES/
+ rpmbuild --define "_topdir $rpmbuild" -ba thrift.spec --define "without_ruby 1" --define "without-php 1"
+ popd > /dev/null
+ popd > /dev/null
+}
+
+# c-capnproto RPM
+# This is a library for capnproto in C. Not to be confused with
+# the capnproto provided by the repos
+function build_capnproto(){
+ rm -rf c-capnproto
+ git clone https://github.com/opensourcerouting/c-capnproto
+ pushd c-capnproto
+ git checkout 332076e52257
+ autoreconf -i
+ ./configure --without-gtest
+ make dist
+
+ cp ${BUILD_ROOT}/rpm_specs/c_capnproto.spec $rpmbuild/SPECS/
+ cp c-capnproto-*.tar.gz $rpmbuild/SOURCES/
+ rpmbuild --define "_topdir $rpmbuild" -ba $rpmbuild/SPECS/c_capnproto.spec
+ popd > /dev/null
+}
+
+build_quagga(){
+ # Build Quagga
+ rm -rf quagga
+ sudo yum -y install $rpmbuild/RPMS/x86_64/*.rpm
+ git clone https://github.com/6WIND/quagga.git
+ pushd quagga > /dev/null
+ # checkout the parent of the bellow patch.
+ # Once the issue addressed by the patch is fixed
+ # these two lines can be removed.
+ git checkout 95bb0f4a
+ patch -p1 < ${PATCHES_DIR}/fix_quagga_make_dist.patch
+ autoreconf -i
+ ./configure --with-zeromq --with-ccapnproto --enable-user=quagga \
+ --enable-group=quagga --enable-vty-group=quagga \
+ --disable-doc --enable-multipath=64
+
+ # Quagga RPM
+ make dist
+ cp ${BUILD_ROOT}/rpm_specs/quagga.spec $rpmbuild/SPECS/
+ cp quagga*.tar.gz $rpmbuild/SOURCES/
+ cat > $rpmbuild/SOURCES/bgpd.conf <<EOF
+hostname bgpd
+password sdncbgpc
+service advanced-vty
+log stdout
+line vty
+ exec-timeout 0 0
+debug bgp
+debug bgp updates
+debug bgp events
+debug bgp fsm
+EOF
+ rpmbuild --define "_topdir $rpmbuild" -ba $rpmbuild/SPECS/quagga.spec
+ popd > /dev/null
+}
+
+# Build ZPRC
+build_zrpc(){
+ sudo yum -y install $rpmbuild/RPMS/x86_64/*.rpm
+ rm -rf zrpcd
+ git clone https://github.com/6WIND/zrpcd.git
+ pushd zrpcd > /dev/null
+ touch NEWS README
+ export QUAGGA_CFLAGS='-I/usr/include/quagga/'
+ # checkout the parent of the bellow patch.
+ # Once the issue addressed by the patch is fixed
+ # these two lines can be removed.
+ git checkout 9bd1ee8e
+ patch -p1 < ${PATCHES_DIR}/fix_zrpcd_make_dist.patch
+ patch -p1 < ${PATCHES_DIR}/zrpcd_hardcoded_paths.patch
+ autoreconf -i
+
+ # ZRPC RPM
+ ./configure --enable-zrpcd \
+ --enable-user=quagga --enable-group=quagga \
+ --enable-vty-group=quagga
+ make dist
+
+ cat > $rpmbuild/SOURCES/zrpcd.service <<EOF
+[Unit]
+Description=ZRPC daemon for quagga
+After=network.service
+
+[Service]
+ExecStart=/usr/sbin/zrpcd
+Type=forking
+PIDFile=/var/run/zrpcd.pid
+Restart=on-failure
+
+[Install]
+WantedBy=default.target
+EOF
+ cp zrpcd-*.tar.gz $rpmbuild/SOURCES/
+ cp ${BUILD_ROOT}/rpm_specs/zrpc.spec $rpmbuild/SPECS/
+ rpmbuild --define "_topdir $rpmbuild" -ba $rpmbuild/SPECS/zrpc.spec
+}
+
+# Main
+parse_cmdline "$@"
+
+# Check env vars
+if [ -z "$QUAGGA_BUILD_DIR" ]; then
+ echo "ERROR: You must set QUAGGA_BUILD_DIR env variable as the location to build!"
+ exit 1
+elif [ -z "$QUAGGA_RPMS_DIR" ]; then
+ echo "WARN: QUAGGA_RPMS_DIR env var is not set, will default to QUAGGA_BUILD_DIR/rpmbuild"
+ rpmbuild=${QUAGGA_BUILD_DIR}/rpmbuild
+else
+ rpmbuild=${QUAGGA_RPMS_DIR}
+fi
+
+if [ -z "$BUILD_ROOT" ]; then
+ echo "WARN: BUILD_ROOT env var not set, will default to $(pwd)"
+ BUILD_ROOT=$(pwd)
+fi
+
+if [ -z "$PATCHES_DIR" ]; then
+ echo "WARN: PATCHES_DIR env var not set, will default to ${BUILD_ROOT}/patches"
+ PATCHES_DIR=${BUILD_ROOT}/patches
+fi
+
+if [ -n "$CLEAN" ]; then
+ quagga_clean
+fi
+
+install_quagga_build_deps
+
+mkdir -p ${QUAGGA_BUILD_DIR}
+mkdir -p $rpmbuild $rpmbuild/SOURCES $rpmbuild/SPECS $rpmbuild/RPMS
+pushd $QUAGGA_BUILD_DIR > /dev/null
+
+case "$ARTIFACT" in
+ thrift)
+ build_thrift
+ ;;
+ capnproto)
+ build_capnproto
+ ;;
+ quagga)
+ build_quagga
+ ;;
+ zrpc)
+ build_zrpc
+ ;;
+ *)
+ build_thrift
+ build_capnproto
+ build_quagga
+ build_zprc
+ ;;
+esac
+
+popd > /dev/null
diff --git a/build/first-boot.yaml b/build/first-boot.yaml
new file mode 100644
index 00000000..6cd874cc
--- /dev/null
+++ b/build/first-boot.yaml
@@ -0,0 +1,53 @@
+heat_template_version: 2014-10-16
+
+description: >
+ This is an example showing how you can do firstboot configuration
+ of the nodes via cloud-init. To enable this, replace the default
+ mapping of OS::TripleO::NodeUserData in ../overcloud_resource_registry*
+
+parameters:
+ ComputeKernelArgs:
+ description: >
+ Space seprated list of Kernel args to be update to grub.
+ The given args will be appended to existing args of GRUB_CMDLINE_LINUX in file /etc/default/grub
+ Example: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+ type: string
+ default: ""
+
+resources:
+ userdata:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: {get_resource: compute_kernel_args}
+
+ # Verify the logs on /var/log/cloud-init.log on the overcloud node
+ compute_kernel_args:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ config:
+ str_replace:
+ template: |
+ #!/bin/bash
+ set -x
+ sed 's/^\(GRUB_CMDLINE_LINUX=".*\)"/\1 $KERNEL_ARGS"/g' -i /etc/default/grub ;
+ grub2-mkconfig -o /etc/grub2.cfg
+ hugepage_count=`echo $KERNEL_ARGS | grep -oP ' ?hugepages=\K[0-9]+'`
+ if [ -z "$hugepage_count" ]; then
+ hugepage_count=1024
+ fi
+ echo vm.hugetlb_shm_group=0 >> /usr/lib/sysctl.d/00-system.conf
+ echo vm.max_map_count=$(printf "%.0f" $(echo 2.2*$hugepage_count | bc)) >> /usr/lib/sysctl.d/00-system.conf
+ echo kernel.shmmax=$(($hugepage_count * 2 * 1024 * 1024)) >> /usr/lib/sysctl.d/00-system.conf
+
+ reboot
+ params:
+ $KERNEL_ARGS: {get_param: ComputeKernelArgs}
+
+outputs:
+ # This means get_resource from the parent template will get the userdata, see:
+ # http://docs.openstack.org/developer/heat/template_guide/composition.html#making-your-template-resource-more-transparent
+ # Note this is new-for-kilo, an alternative is returning a value then using
+ # get_attr in the parent template instead.
+ OS::stack_id:
+ value: {get_resource: userdata}
diff --git a/build/neutron-patch-NSDriver.patch b/build/neutron-patch-NSDriver.patch
new file mode 100644
index 00000000..e015064c
--- /dev/null
+++ b/build/neutron-patch-NSDriver.patch
@@ -0,0 +1,208 @@
+From ff4e918d21970a81604a0aaa2af888141f93cdac Mon Sep 17 00:00:00 2001
+From: Feng Pan <fpan@redhat.com>
+Date: Sun, 5 Feb 2017 21:34:19 -0500
+Subject: [PATCH] Add NSDriver
+
+---
+ neutron/agent/l3/namespaces.py | 6 ++--
+ neutron/agent/l3/router_info.py | 14 ++++----
+ neutron/agent/linux/interface.py | 76 +++++++++++++++++++++++++++++++++++++++-
+ 3 files changed, 87 insertions(+), 9 deletions(-)
+
+diff --git a/neutron/agent/l3/namespaces.py b/neutron/agent/l3/namespaces.py
+index e70d7bb..3c932a8 100644
+--- a/neutron/agent/l3/namespaces.py
++++ b/neutron/agent/l3/namespaces.py
+@@ -18,6 +18,7 @@ import functools
+ from oslo_log import log as logging
+ from oslo_utils import excutils
+
++from neutron.agent.linux.interface import OVSInterfaceDriver
+ from neutron._i18n import _LE, _LW
+ from neutron.agent.linux import ip_lib
+
+@@ -110,8 +111,9 @@ class Namespace(object):
+
+ class RouterNamespace(Namespace):
+
+- def __init__(self, router_id, agent_conf, driver, use_ipv6):
++ def __init__(self, router_id, agent_conf, driver, use_ipv6, ovs_driver):
+ self.router_id = router_id
++ self.ovs_driver = ovs_driver
+ name = self._get_ns_name(router_id)
+ super(RouterNamespace, self).__init__(
+ name, agent_conf, driver, use_ipv6)
+@@ -131,7 +133,7 @@ class RouterNamespace(Namespace):
+ elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX):
+ ns_ip.del_veth(d.name)
+ elif d.name.startswith(EXTERNAL_DEV_PREFIX):
+- self.driver.unplug(
++ self.ovs_driver.unplug(
+ d.name,
+ bridge=self.agent_conf.external_network_bridge,
+ namespace=self.name,
+diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py
+index 3fd3934..b33fb7e 100644
+--- a/neutron/agent/l3/router_info.py
++++ b/neutron/agent/l3/router_info.py
+@@ -27,6 +27,7 @@ from neutron.common import exceptions as n_exc
+ from neutron.common import ipv6_utils
+ from neutron.common import utils as common_utils
+ from neutron.ipam import utils as ipam_utils
++from neutron.agent.linux.interface import OVSInterfaceDriver
+
+ LOG = logging.getLogger(__name__)
+ INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
+@@ -47,6 +48,7 @@ class RouterInfo(object):
+ agent_conf,
+ interface_driver,
+ use_ipv6=False):
++ self.ovs_driver = OVSInterfaceDriver(agent_conf)
+ self.router_id = router_id
+ self.ex_gw_port = None
+ self._snat_enabled = None
+@@ -57,7 +59,7 @@ class RouterInfo(object):
+ self.router = router
+ self.use_ipv6 = use_ipv6
+ ns = self.create_router_namespace_object(
+- router_id, agent_conf, interface_driver, use_ipv6)
++ router_id, agent_conf, interface_driver, use_ipv6, self.ovs_driver)
+ self.router_namespace = ns
+ self.ns_name = ns.name
+ self.available_mark_ids = set(range(ADDRESS_SCOPE_MARK_ID_MIN,
+@@ -94,9 +96,9 @@ class RouterInfo(object):
+ self.router_namespace.create()
+
+ def create_router_namespace_object(
+- self, router_id, agent_conf, iface_driver, use_ipv6):
++ self, router_id, agent_conf, iface_driver, use_ipv6, ovs_driver):
+ return namespaces.RouterNamespace(
+- router_id, agent_conf, iface_driver, use_ipv6)
++ router_id, agent_conf, iface_driver, use_ipv6, ovs_driver)
+
+ @property
+ def router(self):
+@@ -583,7 +585,7 @@ class RouterInfo(object):
+ for ip in floating_ips]
+
+ def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name):
+- self.driver.plug(ex_gw_port['network_id'],
++ self.ovs_driver.plug(ex_gw_port['network_id'],
+ ex_gw_port['id'],
+ interface_name,
+ ex_gw_port['mac_address'],
+@@ -641,7 +643,7 @@ class RouterInfo(object):
+
+ self._add_route_to_gw(ex_gw_port, device_name=interface_name,
+ namespace=ns_name, preserve_ips=preserve_ips)
+- self.driver.init_router_port(
++ self.ovs_driver.init_router_port(
+ interface_name,
+ ip_cidrs,
+ namespace=ns_name,
+@@ -735,7 +737,7 @@ class RouterInfo(object):
+ for stale_dev in stale_devs:
+ LOG.debug('Deleting stale external router device: %s', stale_dev)
+ pd.remove_gw_interface(self.router['id'])
+- self.driver.unplug(stale_dev,
++ self.ovs_driver.unplug(stale_dev,
+ bridge=self.agent_conf.external_network_bridge,
+ namespace=self.ns_name,
+ prefix=EXTERNAL_DEV_PREFIX)
+diff --git a/neutron/agent/linux/interface.py b/neutron/agent/linux/interface.py
+index c2eb06e..80da16f 100644
+--- a/neutron/agent/linux/interface.py
++++ b/neutron/agent/linux/interface.py
+@@ -15,7 +15,7 @@
+
+ import abc
+ import time
+-
++import eventlet
+ import netaddr
+ from neutron_lib import constants
+ from oslo_config import cfg
+@@ -288,6 +288,80 @@ class NullDriver(LinuxInterfaceDriver):
+ def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
+ pass
+
++class NSDriver(LinuxInterfaceDriver):
++ """Device independent driver enabling creation of a non device specific
++ interface in network spaces. Attachment to the device is not performed.
++ """
++ MAX_TIME_FOR_DEVICE_EXISTENCE = 30
++
++ @classmethod
++ def _device_is_created_in_time(cls, device_name):
++ """See if device is created, within time limit."""
++ attempt = 0
++ while attempt < NSDriver.MAX_TIME_FOR_DEVICE_EXISTENCE:
++ if ip_lib.device_exists(device_name):
++ return True
++ attempt += 1
++ eventlet.sleep(1)
++ LOG.error(_LE("Device %(dev)s was not created in %(time)d seconds"),
++ {'dev': device_name,
++ 'time': NSDriver.MAX_TIME_FOR_DEVICE_EXISTENCE})
++ return False
++
++ def _configure_mtu(self, ns_dev, mtu=None):
++ # Need to set MTU, after added to namespace. See review
++ # https://review.openstack.org/327651
++ try:
++ # Note: network_device_mtu will be deprecated in future
++ mtu_override = self.conf.network_device_mtu
++ except cfg.NoSuchOptError:
++ LOG.warning(_LW("Config setting for MTU deprecated - any "
++ "override will be ignored."))
++ mtu_override = None
++ if mtu_override:
++ mtu = mtu_override
++ LOG.debug("Overriding MTU to %d", mtu)
++ if mtu:
++ ns_dev.link.set_mtu(mtu)
++ else:
++ LOG.debug("No MTU provided - skipping setting value")
++
++ def plug(self, network_id, port_id, device_name, mac_address,
++ bridge=None, namespace=None, prefix=None, mtu=None):
++
++ # Overriding this, we still want to add an existing device into the
++ # namespace.
++ self.plug_new(network_id, port_id, device_name, mac_address,
++ bridge, namespace, prefix, mtu)
++
++ def plug_new(self, network_id, port_id, device_name, mac_address,
++ bridge=None, namespace=None, prefix=None, mtu=None):
++
++ ip = ip_lib.IPWrapper()
++ ns_dev = ip.device(device_name)
++
++ LOG.debug("Plugging dev: '%s' into namespace: '%s' ",
++ device_name, namespace)
++
++ # Wait for device creation
++ if not self._device_is_created_in_time(device_name):
++ return
++
++ ns_dev.link.set_address(mac_address)
++
++ if namespace:
++ namespace_obj = ip.ensure_namespace(namespace)
++ namespace_obj.add_device_to_namespace(ns_dev)
++
++ self._configure_mtu(ns_dev, mtu)
++
++ ns_dev.link.set_up()
++
++ def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
++ # Device removal is done externally. Just remove the namespace
++ LOG.debug("Removing namespace: '%s'", namespace)
++ ip_lib.IPWrapper(namespace).garbage_collect_namespace()
++
+
+ class OVSInterfaceDriver(LinuxInterfaceDriver):
+ """Driver for creating an internal interface on an OVS bridge."""
+--
+2.9.3
+
diff --git a/build/neutron/agent/interface/interface.py b/build/neutron/agent/interface/interface.py
deleted file mode 100644
index 709fd677..00000000
--- a/build/neutron/agent/interface/interface.py
+++ /dev/null
@@ -1,552 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-import eventlet
-import netaddr
-from oslo_config import cfg
-from oslo_log import log as logging
-import six
-
-from neutron._i18n import _, _LE, _LI, _LW
-from neutron.agent.common import ovs_lib
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import utils
-from neutron.common import constants as n_const
-from neutron.common import exceptions
-from neutron.common import ipv6_utils
-
-
-LOG = logging.getLogger(__name__)
-
-OPTS = [
- cfg.StrOpt('ovs_integration_bridge',
- default='br-int',
- help=_('Name of Open vSwitch bridge to use')),
- cfg.BoolOpt('ovs_use_veth',
- default=False,
- help=_('Uses veth for an OVS interface or not. '
- 'Support kernels with limited namespace support '
- '(e.g. RHEL 6.5) so long as ovs_use_veth is set to '
- 'True.')),
- cfg.IntOpt('network_device_mtu',
- deprecated_for_removal=True,
- help=_('MTU setting for device. This option will be removed in '
- 'Newton. Please use the system-wide segment_mtu setting '
- 'which the agents will take into account when wiring '
- 'VIFs.')),
-]
-
-
-@six.add_metaclass(abc.ABCMeta)
-class LinuxInterfaceDriver(object):
-
- # from linux IF_NAMESIZE
- DEV_NAME_LEN = 14
- DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
-
- def __init__(self, conf):
- self.conf = conf
- if self.conf.network_device_mtu:
- self._validate_network_device_mtu()
-
- def _validate_network_device_mtu(self):
- if (ipv6_utils.is_enabled() and
- self.conf.network_device_mtu < n_const.IPV6_MIN_MTU):
- LOG.error(_LE("IPv6 protocol requires a minimum MTU of "
- "%(min_mtu)s, while the configured value is "
- "%(current_mtu)s"), {'min_mtu': n_const.IPV6_MIN_MTU,
- 'current_mtu': self.conf.network_device_mtu})
- raise SystemExit(1)
-
- @property
- def use_gateway_ips(self):
- """Whether to use gateway IPs instead of unique IP allocations.
-
- In each place where the DHCP agent runs, and for each subnet for
- which DHCP is handling out IP addresses, the DHCP port needs -
- at the Linux level - to have an IP address within that subnet.
- Generally this needs to be a unique Neutron-allocated IP
- address, because the subnet's underlying L2 domain is bridged
- across multiple compute hosts and network nodes, and for HA
- there may be multiple DHCP agents running on that same bridged
- L2 domain.
-
- However, if the DHCP ports - on multiple compute/network nodes
- but for the same network - are _not_ bridged to each other,
- they do not need each to have a unique IP address. Instead
- they can all share the same address from the relevant subnet.
- This works, without creating any ambiguity, because those
- ports are not all present on the same L2 domain, and because
- no data within the network is ever sent to that address.
- (DHCP requests are broadcast, and it is the network's job to
- ensure that such a broadcast will reach at least one of the
- available DHCP servers. DHCP responses will be sent _from_
- the DHCP port address.)
-
- Specifically, for networking backends where it makes sense,
- the DHCP agent allows all DHCP ports to use the subnet's
- gateway IP address, and thereby to completely avoid any unique
- IP address allocation. This behaviour is selected by running
- the DHCP agent with a configured interface driver whose
- 'use_gateway_ips' property is True.
-
- When an operator deploys Neutron with an interface driver that
- makes use_gateway_ips True, they should also ensure that a
- gateway IP address is defined for each DHCP-enabled subnet,
- and that the gateway IP address doesn't change during the
- subnet's lifetime.
- """
- return False
-
- def init_l3(self, device_name, ip_cidrs, namespace=None,
- preserve_ips=None, clean_connections=False):
- """Set the L3 settings for the interface using data from the port.
-
- ip_cidrs: list of 'X.X.X.X/YY' strings
- preserve_ips: list of ip cidrs that should not be removed from device
- clean_connections: Boolean to indicate if we should cleanup connections
- associated to removed ips
- """
- preserve_ips = preserve_ips or []
- device = ip_lib.IPDevice(device_name, namespace=namespace)
-
- # The LLA generated by the operating system is not known to
- # Neutron, so it would be deleted if we added it to the 'previous'
- # list here
- default_ipv6_lla = ip_lib.get_ipv6_lladdr(device.link.address)
- previous = {addr['cidr'] for addr in device.addr.list(
- filters=['permanent'])} - {default_ipv6_lla}
-
- # add new addresses
- for ip_cidr in ip_cidrs:
-
- net = netaddr.IPNetwork(ip_cidr)
- # Convert to compact IPv6 address because the return values of
- # "ip addr list" are compact.
- if net.version == 6:
- ip_cidr = str(net)
- if ip_cidr in previous:
- previous.remove(ip_cidr)
- continue
-
- device.addr.add(ip_cidr)
-
- # clean up any old addresses
- for ip_cidr in previous:
- if ip_cidr not in preserve_ips:
- if clean_connections:
- device.delete_addr_and_conntrack_state(ip_cidr)
- else:
- device.addr.delete(ip_cidr)
-
- def init_router_port(self,
- device_name,
- ip_cidrs,
- namespace,
- preserve_ips=None,
- extra_subnets=None,
- clean_connections=False):
- """Set the L3 settings for a router interface using data from the port.
-
- ip_cidrs: list of 'X.X.X.X/YY' strings
- preserve_ips: list of ip cidrs that should not be removed from device
- clean_connections: Boolean to indicate if we should cleanup connections
- associated to removed ips
- extra_subnets: An iterable of cidrs to add as routes without address
- """
- LOG.debug("init_router_port: device_name(%s), namespace(%s)",
- device_name, namespace)
- self.init_l3(device_name=device_name,
- ip_cidrs=ip_cidrs,
- namespace=namespace,
- preserve_ips=preserve_ips or [],
- clean_connections=clean_connections)
-
- device = ip_lib.IPDevice(device_name, namespace=namespace)
-
- # Manage on-link routes (routes without an associated address)
- new_onlink_cidrs = set(s['cidr'] for s in extra_subnets or [])
-
- v4_onlink = device.route.list_onlink_routes(n_const.IP_VERSION_4)
- v6_onlink = device.route.list_onlink_routes(n_const.IP_VERSION_6)
- existing_onlink_cidrs = set(r['cidr'] for r in v4_onlink + v6_onlink)
-
- for route in new_onlink_cidrs - existing_onlink_cidrs:
- LOG.debug("adding onlink route(%s)", route)
- device.route.add_onlink_route(route)
- for route in (existing_onlink_cidrs - new_onlink_cidrs -
- set(preserve_ips or [])):
- LOG.debug("deleting onlink route(%s)", route)
- device.route.delete_onlink_route(route)
-
- def add_ipv6_addr(self, device_name, v6addr, namespace, scope='global'):
- device = ip_lib.IPDevice(device_name,
- namespace=namespace)
- net = netaddr.IPNetwork(v6addr)
- device.addr.add(str(net), scope)
-
- def delete_ipv6_addr(self, device_name, v6addr, namespace):
- device = ip_lib.IPDevice(device_name,
- namespace=namespace)
- device.delete_addr_and_conntrack_state(v6addr)
-
- def delete_ipv6_addr_with_prefix(self, device_name, prefix, namespace):
- """Delete the first listed IPv6 address that falls within a given
- prefix.
- """
- device = ip_lib.IPDevice(device_name, namespace=namespace)
- net = netaddr.IPNetwork(prefix)
- for address in device.addr.list(scope='global', filters=['permanent']):
- ip_address = netaddr.IPNetwork(address['cidr'])
- if ip_address in net:
- device.delete_addr_and_conntrack_state(address['cidr'])
- break
-
- def get_ipv6_llas(self, device_name, namespace):
- device = ip_lib.IPDevice(device_name,
- namespace=namespace)
-
- return device.addr.list(scope='link', ip_version=6)
-
- def check_bridge_exists(self, bridge):
- if not ip_lib.device_exists(bridge):
- raise exceptions.BridgeDoesNotExist(bridge=bridge)
-
- def get_device_name(self, port):
- return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN]
-
- @staticmethod
- def configure_ipv6_ra(namespace, dev_name):
- """Configure acceptance of IPv6 route advertisements on an intf."""
- # Learn the default router's IP address via RAs
- ip_lib.IPWrapper(namespace=namespace).netns.execute(
- ['sysctl', '-w', 'net.ipv6.conf.%s.accept_ra=2' % dev_name])
-
- @abc.abstractmethod
- def plug_new(self, network_id, port_id, device_name, mac_address,
- bridge=None, namespace=None, prefix=None, mtu=None):
- """Plug in the interface only for new devices that don't exist yet."""
-
- def plug(self, network_id, port_id, device_name, mac_address,
- bridge=None, namespace=None, prefix=None, mtu=None):
- if not ip_lib.device_exists(device_name,
- namespace=namespace):
- try:
- self.plug_new(network_id, port_id, device_name, mac_address,
- bridge, namespace, prefix, mtu)
- except TypeError:
- self.plug_new(network_id, port_id, device_name, mac_address,
- bridge, namespace, prefix)
- else:
- LOG.info(_LI("Device %s already exists"), device_name)
-
- @abc.abstractmethod
- def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
- """Unplug the interface."""
-
- @property
- def bridged(self):
- """Whether the DHCP port is bridged to the VM TAP interfaces.
-
- When the DHCP port is bridged to the TAP interfaces for the
- VMs for which it is providing DHCP service - as is the case
- for most Neutron network implementations - the DHCP server
- only needs to listen on the DHCP port, and will still receive
- DHCP requests from all the relevant VMs.
-
- If the DHCP port is not bridged to the relevant VM TAP
- interfaces, the DHCP server needs to listen explicitly on
- those TAP interfaces, and to treat those as aliases of the
- DHCP port where the IP subnet is defined.
- """
- return True
-
-
-class NullDriver(LinuxInterfaceDriver):
- def plug_new(self, network_id, port_id, device_name, mac_address,
- bridge=None, namespace=None, prefix=None, mtu=None):
- pass
-
- def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
- pass
-
-class NSDriver(LinuxInterfaceDriver):
- """Device independent driver enabling creation of a non device specific
- interface in network spaces. Attachment to the device is not performed.
- """
- MAX_TIME_FOR_DEVICE_EXISTENCE = 30
-
- @classmethod
- def _device_is_created_in_time(cls, device_name):
- """See if device is created, within time limit."""
- attempt = 0
- while attempt < NSDriver.MAX_TIME_FOR_DEVICE_EXISTENCE:
- if ip_lib.device_exists(device_name):
- return True
- attempt += 1
- eventlet.sleep(1)
- LOG.error(_LE("Device %(dev)s was not created in %(time)d seconds"),
- {'dev': device_name,
- 'time': NSDriver.MAX_TIME_FOR_DEVICE_EXISTENCE})
- return False
-
- def _configure_mtu(self, ns_dev, mtu=None):
- # Need to set MTU, after added to namespace. See review
- # https://review.openstack.org/327651
- try:
- # Note: network_device_mtu will be deprecated in future
- mtu_override = self.conf.network_device_mtu
- except cfg.NoSuchOptError:
- LOG.warning(_LW("Config setting for MTU deprecated - any "
- "override will be ignored."))
- mtu_override = None
- if mtu_override:
- mtu = mtu_override
- LOG.debug("Overriding MTU to %d", mtu)
- if mtu:
- ns_dev.link.set_mtu(mtu)
- else:
- LOG.debug("No MTU provided - skipping setting value")
-
- def plug(self, network_id, port_id, device_name, mac_address,
- bridge=None, namespace=None, prefix=None, mtu=None):
-
- # Overriding this, we still want to add an existing device into the
- # namespace.
- self.plug_new(network_id, port_id, device_name, mac_address,
- bridge, namespace, prefix, mtu)
-
- def plug_new(self, network_id, port_id, device_name, mac_address,
- bridge=None, namespace=None, prefix=None, mtu=None):
-
- ip = ip_lib.IPWrapper()
- ns_dev = ip.device(device_name)
-
- LOG.debug("Plugging dev: '%s' into namespace: '%s' ",
- device_name, namespace)
-
- # Wait for device creation
- if not self._device_is_created_in_time(device_name):
- return
-
- ns_dev.link.set_address(mac_address)
-
- if namespace:
- namespace_obj = ip.ensure_namespace(namespace)
- namespace_obj.add_device_to_namespace(ns_dev)
-
- self._configure_mtu(ns_dev, mtu)
-
- ns_dev.link.set_up()
-
- def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
- # Device removal is done externally. Just remove the namespace
- LOG.debug("Removing namespace: '%s'", namespace)
- ip_lib.IPWrapper(namespace).garbage_collect_namespace()
-
-
-class OVSInterfaceDriver(LinuxInterfaceDriver):
- """Driver for creating an internal interface on an OVS bridge."""
-
- DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
-
- def __init__(self, conf):
- super(OVSInterfaceDriver, self).__init__(conf)
- if self.conf.ovs_use_veth:
- self.DEV_NAME_PREFIX = 'ns-'
-
- def _get_tap_name(self, dev_name, prefix=None):
- if self.conf.ovs_use_veth:
- dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX,
- n_const.TAP_DEVICE_PREFIX)
- return dev_name
-
- def _ovs_add_port(self, bridge, device_name, port_id, mac_address,
- internal=True):
- attrs = [('external_ids', {'iface-id': port_id,
- 'iface-status': 'active',
- 'attached-mac': mac_address})]
- if internal:
- attrs.insert(0, ('type', 'internal'))
-
- ovs = ovs_lib.OVSBridge(bridge)
- ovs.replace_port(device_name, *attrs)
-
- def plug_new(self, network_id, port_id, device_name, mac_address,
- bridge=None, namespace=None, prefix=None, mtu=None):
- """Plug in the interface."""
- if not bridge:
- bridge = self.conf.ovs_integration_bridge
-
- self.check_bridge_exists(bridge)
-
- ip = ip_lib.IPWrapper()
- tap_name = self._get_tap_name(device_name, prefix)
-
- if self.conf.ovs_use_veth:
- # Create ns_dev in a namespace if one is configured.
- root_dev, ns_dev = ip.add_veth(tap_name,
- device_name,
- namespace2=namespace)
- root_dev.disable_ipv6()
- else:
- ns_dev = ip.device(device_name)
-
- internal = not self.conf.ovs_use_veth
- self._ovs_add_port(bridge, tap_name, port_id, mac_address,
- internal=internal)
-
- ns_dev.link.set_address(mac_address)
-
- # Add an interface created by ovs to the namespace.
- if not self.conf.ovs_use_veth and namespace:
- namespace_obj = ip.ensure_namespace(namespace)
- namespace_obj.add_device_to_namespace(ns_dev)
-
- # NOTE(ihrachys): the order here is significant: we must set MTU after
- # the device is moved into a namespace, otherwise OVS bridge does not
- # allow to set MTU that is higher than the least of all device MTUs on
- # the bridge
- mtu = self.conf.network_device_mtu or mtu
- if mtu:
- ns_dev.link.set_mtu(mtu)
- if self.conf.ovs_use_veth:
- root_dev.link.set_mtu(mtu)
- else:
- LOG.warning(_LW("No MTU configured for port %s"), port_id)
-
- ns_dev.link.set_up()
- if self.conf.ovs_use_veth:
- root_dev.link.set_up()
-
- def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
- """Unplug the interface."""
- if not bridge:
- bridge = self.conf.ovs_integration_bridge
-
- tap_name = self._get_tap_name(device_name, prefix)
- self.check_bridge_exists(bridge)
- ovs = ovs_lib.OVSBridge(bridge)
-
- try:
- ovs.delete_port(tap_name)
- if self.conf.ovs_use_veth:
- device = ip_lib.IPDevice(device_name, namespace=namespace)
- device.link.delete()
- LOG.debug("Unplugged interface '%s'", device_name)
- except RuntimeError:
- LOG.error(_LE("Failed unplugging interface '%s'"),
- device_name)
-
-
-class IVSInterfaceDriver(LinuxInterfaceDriver):
- """Driver for creating an internal interface on an IVS bridge."""
-
- DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
-
- def __init__(self, conf):
- super(IVSInterfaceDriver, self).__init__(conf)
- self.DEV_NAME_PREFIX = 'ns-'
-
- def _get_tap_name(self, dev_name, prefix=None):
- dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX,
- n_const.TAP_DEVICE_PREFIX)
- return dev_name
-
- def _ivs_add_port(self, device_name, port_id, mac_address):
- cmd = ['ivs-ctl', 'add-port', device_name]
- utils.execute(cmd, run_as_root=True)
-
- def plug_new(self, network_id, port_id, device_name, mac_address,
- bridge=None, namespace=None, prefix=None, mtu=None):
- """Plug in the interface."""
- ip = ip_lib.IPWrapper()
- tap_name = self._get_tap_name(device_name, prefix)
-
- root_dev, ns_dev = ip.add_veth(tap_name, device_name)
- root_dev.disable_ipv6()
-
- self._ivs_add_port(tap_name, port_id, mac_address)
-
- ns_dev = ip.device(device_name)
- ns_dev.link.set_address(mac_address)
-
- mtu = self.conf.network_device_mtu or mtu
- if mtu:
- ns_dev.link.set_mtu(mtu)
- root_dev.link.set_mtu(mtu)
- else:
- LOG.warning(_LW("No MTU configured for port %s"), port_id)
-
- if namespace:
- namespace_obj = ip.ensure_namespace(namespace)
- namespace_obj.add_device_to_namespace(ns_dev)
-
- ns_dev.link.set_up()
- root_dev.link.set_up()
-
- def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
- """Unplug the interface."""
- tap_name = self._get_tap_name(device_name, prefix)
- try:
- cmd = ['ivs-ctl', 'del-port', tap_name]
- utils.execute(cmd, run_as_root=True)
- device = ip_lib.IPDevice(device_name, namespace=namespace)
- device.link.delete()
- LOG.debug("Unplugged interface '%s'", device_name)
- except RuntimeError:
- LOG.error(_LE("Failed unplugging interface '%s'"),
- device_name)
-
-
-class BridgeInterfaceDriver(LinuxInterfaceDriver):
- """Driver for creating bridge interfaces."""
-
- DEV_NAME_PREFIX = 'ns-'
-
- def plug_new(self, network_id, port_id, device_name, mac_address,
- bridge=None, namespace=None, prefix=None, mtu=None):
- """Plugin the interface."""
- ip = ip_lib.IPWrapper()
-
- # Enable agent to define the prefix
- tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX,
- n_const.TAP_DEVICE_PREFIX)
- # Create ns_veth in a namespace if one is configured.
- root_veth, ns_veth = ip.add_veth(tap_name, device_name,
- namespace2=namespace)
- root_veth.disable_ipv6()
- ns_veth.link.set_address(mac_address)
-
- mtu = self.conf.network_device_mtu or mtu
- if mtu:
- root_veth.link.set_mtu(mtu)
- ns_veth.link.set_mtu(mtu)
- else:
- LOG.warning(_LW("No MTU configured for port %s"), port_id)
-
- root_veth.link.set_up()
- ns_veth.link.set_up()
-
- def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
- """Unplug the interface."""
- device = ip_lib.IPDevice(device_name, namespace=namespace)
- try:
- device.link.delete()
- LOG.debug("Unplugged interface '%s'", device_name)
- except RuntimeError:
- LOG.error(_LE("Failed unplugging interface '%s'"),
- device_name)
diff --git a/build/neutron/agent/l3/namespaces.py b/build/neutron/agent/l3/namespaces.py
deleted file mode 100644
index aa282052..00000000
--- a/build/neutron/agent/l3/namespaces.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# Copyright 2015 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import functools
-
-from oslo_log import log as logging
-from oslo_utils import excutils
-
-from neutron.agent.linux.interface import OVSInterfaceDriver
-from neutron._i18n import _LE, _LW
-from neutron.agent.linux import ip_lib
-
-LOG = logging.getLogger(__name__)
-
-NS_PREFIX = 'qrouter-'
-INTERNAL_DEV_PREFIX = 'qr-'
-EXTERNAL_DEV_PREFIX = 'qg-'
-# TODO(Carl) It is odd that this file needs this. It is a dvr detail.
-ROUTER_2_FIP_DEV_PREFIX = 'rfp-'
-
-
-def build_ns_name(prefix, identifier):
- """Builds a namespace name from the given prefix and identifier
-
- :param prefix: The prefix which must end with '-' for legacy reasons
- :param identifier: The id associated with the namespace
- """
- return prefix + identifier
-
-
-def get_prefix_from_ns_name(ns_name):
- """Parses prefix from prefix-identifier
-
- :param ns_name: The name of a namespace
- :returns: The prefix ending with a '-' or None if there is no '-'
- """
- dash_index = ns_name.find('-')
- if 0 <= dash_index:
- return ns_name[:dash_index + 1]
-
-
-def get_id_from_ns_name(ns_name):
- """Parses identifier from prefix-identifier
-
- :param ns_name: The name of a namespace
- :returns: Identifier or None if there is no - to end the prefix
- """
- dash_index = ns_name.find('-')
- if 0 <= dash_index:
- return ns_name[dash_index + 1:]
-
-
-def check_ns_existence(f):
- @functools.wraps(f)
- def wrapped(self, *args, **kwargs):
- if not self.exists():
- LOG.warning(_LW('Namespace %(name)s does not exists. Skipping '
- '%(func)s'),
- {'name': self.name, 'func': f.__name__})
- return
- try:
- return f(self, *args, **kwargs)
- except RuntimeError:
- with excutils.save_and_reraise_exception() as ctx:
- if not self.exists():
- LOG.debug('Namespace %(name)s was concurrently deleted',
- self.name)
- ctx.reraise = False
- return wrapped
-
-
-class Namespace(object):
-
- def __init__(self, name, agent_conf, driver, use_ipv6):
- self.name = name
- self.ip_wrapper_root = ip_lib.IPWrapper()
- self.agent_conf = agent_conf
- self.driver = driver
- self.use_ipv6 = use_ipv6
-
- def create(self):
- ip_wrapper = self.ip_wrapper_root.ensure_namespace(self.name)
- cmd = ['sysctl', '-w', 'net.ipv4.ip_forward=1']
- ip_wrapper.netns.execute(cmd)
- if self.use_ipv6:
- cmd = ['sysctl', '-w', 'net.ipv6.conf.all.forwarding=1']
- ip_wrapper.netns.execute(cmd)
-
- def delete(self):
- try:
- self.ip_wrapper_root.netns.delete(self.name)
- except RuntimeError:
- msg = _LE('Failed trying to delete namespace: %s')
- LOG.exception(msg, self.name)
-
- def exists(self):
- return self.ip_wrapper_root.netns.exists(self.name)
-
-
-class RouterNamespace(Namespace):
-
- def __init__(self, router_id, agent_conf, driver, use_ipv6, ovs_driver):
- self.router_id = router_id
- self.ovs_driver = ovs_driver
- name = self._get_ns_name(router_id)
- super(RouterNamespace, self).__init__(
- name, agent_conf, driver, use_ipv6)
-
- @classmethod
- def _get_ns_name(cls, router_id):
- return build_ns_name(NS_PREFIX, router_id)
-
- @check_ns_existence
- def delete(self):
- ns_ip = ip_lib.IPWrapper(namespace=self.name)
- for d in ns_ip.get_devices(exclude_loopback=True):
- if d.name.startswith(INTERNAL_DEV_PREFIX):
- # device is on default bridge
- self.driver.unplug(d.name, namespace=self.name,
- prefix=INTERNAL_DEV_PREFIX)
- elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX):
- ns_ip.del_veth(d.name)
- elif d.name.startswith(EXTERNAL_DEV_PREFIX):
- self.ovs_driver.unplug(
- d.name,
- bridge=self.agent_conf.external_network_bridge,
- namespace=self.name,
- prefix=EXTERNAL_DEV_PREFIX)
-
- super(RouterNamespace, self).delete()
diff --git a/build/neutron/agent/l3/router_info.py b/build/neutron/agent/l3/router_info.py
deleted file mode 100644
index 0ddd1db5..00000000
--- a/build/neutron/agent/l3/router_info.py
+++ /dev/null
@@ -1,996 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import collections
-import netaddr
-from oslo_log import log as logging
-
-from neutron._i18n import _, _LE, _LW
-from neutron.agent.l3 import namespaces
-from neutron.agent.linux import ip_lib
-from neutron.agent.linux import iptables_manager
-from neutron.agent.linux import ra
-from neutron.common import constants as l3_constants
-from neutron.common import exceptions as n_exc
-from neutron.common import ipv6_utils
-from neutron.common import utils as common_utils
-from neutron.ipam import utils as ipam_utils
-from neutron.agent.linux.interface import OVSInterfaceDriver
-
-LOG = logging.getLogger(__name__)
-INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
-EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
-
-FLOATINGIP_STATUS_NOCHANGE = object()
-ADDRESS_SCOPE_MARK_MASK = "0xffff0000"
-ADDRESS_SCOPE_MARK_ID_MIN = 1024
-ADDRESS_SCOPE_MARK_ID_MAX = 2048
-DEFAULT_ADDRESS_SCOPE = "noscope"
-
-
-class RouterInfo(object):
-
- def __init__(self,
- router_id,
- router,
- agent_conf,
- interface_driver,
- use_ipv6=False):
- self.ovs_driver = OVSInterfaceDriver(agent_conf)
- self.router_id = router_id
- self.ex_gw_port = None
- self._snat_enabled = None
- self.fip_map = {}
- self.internal_ports = []
- self.floating_ips = set()
- # Invoke the setter for establishing initial SNAT action
- self.router = router
- self.use_ipv6 = use_ipv6
- ns = namespaces.RouterNamespace(
- router_id, agent_conf, interface_driver, use_ipv6, self.ovs_driver)
- self.router_namespace = ns
- self.ns_name = ns.name
- self.available_mark_ids = set(range(ADDRESS_SCOPE_MARK_ID_MIN,
- ADDRESS_SCOPE_MARK_ID_MAX))
- self._address_scope_to_mark_id = {
- DEFAULT_ADDRESS_SCOPE: self.available_mark_ids.pop()}
- self.iptables_manager = iptables_manager.IptablesManager(
- use_ipv6=use_ipv6,
- namespace=self.ns_name)
- self.routes = []
- self.agent_conf = agent_conf
- self.driver = interface_driver
- # radvd is a neutron.agent.linux.ra.DaemonMonitor
- self.radvd = None
-
- def initialize(self, process_monitor):
- """Initialize the router on the system.
-
- This differs from __init__ in that this method actually affects the
- system creating namespaces, starting processes, etc. The other merely
- initializes the python object. This separates in-memory object
- initialization from methods that actually go do stuff to the system.
-
- :param process_monitor: The agent's process monitor instance.
- """
- self.process_monitor = process_monitor
- self.radvd = ra.DaemonMonitor(self.router_id,
- self.ns_name,
- process_monitor,
- self.get_internal_device_name,
- self.agent_conf)
-
- self.router_namespace.create()
-
- @property
- def router(self):
- return self._router
-
- @router.setter
- def router(self, value):
- self._router = value
- if not self._router:
- return
- # enable_snat by default if it wasn't specified by plugin
- self._snat_enabled = self._router.get('enable_snat', True)
-
- def get_internal_device_name(self, port_id):
- return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
-
- def get_external_device_name(self, port_id):
- return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
-
- def get_external_device_interface_name(self, ex_gw_port):
- return self.get_external_device_name(ex_gw_port['id'])
-
- def _update_routing_table(self, operation, route, namespace):
- cmd = ['ip', 'route', operation, 'to', route['destination'],
- 'via', route['nexthop']]
- ip_wrapper = ip_lib.IPWrapper(namespace=namespace)
- ip_wrapper.netns.execute(cmd, check_exit_code=False)
-
- def update_routing_table(self, operation, route):
- self._update_routing_table(operation, route, self.ns_name)
-
- def routes_updated(self, old_routes, new_routes):
- adds, removes = common_utils.diff_list_of_dict(old_routes,
- new_routes)
- for route in adds:
- LOG.debug("Added route entry is '%s'", route)
- # remove replaced route from deleted route
- for del_route in removes:
- if route['destination'] == del_route['destination']:
- removes.remove(del_route)
- #replace success even if there is no existing route
- self.update_routing_table('replace', route)
- for route in removes:
- LOG.debug("Removed route entry is '%s'", route)
- self.update_routing_table('delete', route)
-
- def get_ex_gw_port(self):
- return self.router.get('gw_port')
-
- def get_floating_ips(self):
- """Filter Floating IPs to be hosted on this agent."""
- return self.router.get(l3_constants.FLOATINGIP_KEY, [])
-
- def floating_forward_rules(self, floating_ip, fixed_ip):
- return [('PREROUTING', '-d %s/32 -j DNAT --to-destination %s' %
- (floating_ip, fixed_ip)),
- ('OUTPUT', '-d %s/32 -j DNAT --to-destination %s' %
- (floating_ip, fixed_ip)),
- ('float-snat', '-s %s/32 -j SNAT --to-source %s' %
- (fixed_ip, floating_ip))]
-
- def floating_mangle_rules(self, floating_ip, fixed_ip, internal_mark):
- mark_traffic_to_floating_ip = (
- 'floatingip', '-d %s -j MARK --set-xmark %s' % (
- floating_ip, internal_mark))
- mark_traffic_from_fixed_ip = (
- 'FORWARD', '-s %s -j $float-snat' % fixed_ip)
- return [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip]
-
- def get_address_scope_mark_mask(self, address_scope=None):
- if not address_scope:
- address_scope = DEFAULT_ADDRESS_SCOPE
-
- if address_scope not in self._address_scope_to_mark_id:
- self._address_scope_to_mark_id[address_scope] = (
- self.available_mark_ids.pop())
-
- mark_id = self._address_scope_to_mark_id[address_scope]
- # NOTE: Address scopes use only the upper 16 bits of the 32 fwmark
- return "%s/%s" % (hex(mark_id << 16), ADDRESS_SCOPE_MARK_MASK)
-
- def get_port_address_scope_mark(self, port):
- """Get the IP version 4 and 6 address scope mark for the port
-
- :param port: A port dict from the RPC call
- :returns: A dict mapping the address family to the address scope mark
- """
- port_scopes = port.get('address_scopes', {})
-
- address_scope_mark_masks = (
- (int(k), self.get_address_scope_mark_mask(v))
- for k, v in port_scopes.items())
- return collections.defaultdict(self.get_address_scope_mark_mask,
- address_scope_mark_masks)
-
- def process_floating_ip_nat_rules(self):
- """Configure NAT rules for the router's floating IPs.
-
- Configures iptables rules for the floating ips of the given router
- """
- # Clear out all iptables rules for floating ips
- self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')
-
- floating_ips = self.get_floating_ips()
- # Loop once to ensure that floating ips are configured.
- for fip in floating_ips:
- # Rebuild iptables rules for the floating ip.
- fixed = fip['fixed_ip_address']
- fip_ip = fip['floating_ip_address']
- for chain, rule in self.floating_forward_rules(fip_ip, fixed):
- self.iptables_manager.ipv4['nat'].add_rule(chain, rule,
- tag='floating_ip')
-
- self.iptables_manager.apply()
-
- def process_floating_ip_address_scope_rules(self):
- """Configure address scope related iptables rules for the router's
- floating IPs.
- """
-
- # Clear out all iptables rules for floating ips
- self.iptables_manager.ipv4['mangle'].clear_rules_by_tag('floating_ip')
- all_floating_ips = self.get_floating_ips()
- ext_scope = self._get_external_address_scope()
- # Filter out the floating ips that have fixed ip in the same address
- # scope. Because the packets for them will always be in one address
- # scope, no need to manipulate MARK/CONNMARK for them.
- floating_ips = [fip for fip in all_floating_ips
- if fip.get('fixed_ip_address_scope') != ext_scope]
- if floating_ips:
- ext_scope_mark = self.get_address_scope_mark_mask(ext_scope)
- ports_scopemark = self._get_address_scope_mark()
- devices_in_ext_scope = {
- device for device, mark
- in ports_scopemark[l3_constants.IP_VERSION_4].items()
- if mark == ext_scope_mark}
- # Add address scope for floatingip egress
- for device in devices_in_ext_scope:
- self.iptables_manager.ipv4['mangle'].add_rule(
- 'float-snat',
- '-o %s -j MARK --set-xmark %s'
- % (device, ext_scope_mark),
- tag='floating_ip')
-
- # Loop once to ensure that floating ips are configured.
- for fip in floating_ips:
- # Rebuild iptables rules for the floating ip.
- fip_ip = fip['floating_ip_address']
- # Send the floating ip traffic to the right address scope
- fixed_ip = fip['fixed_ip_address']
- fixed_scope = fip.get('fixed_ip_address_scope')
- internal_mark = self.get_address_scope_mark_mask(fixed_scope)
- mangle_rules = self.floating_mangle_rules(
- fip_ip, fixed_ip, internal_mark)
- for chain, rule in mangle_rules:
- self.iptables_manager.ipv4['mangle'].add_rule(
- chain, rule, tag='floating_ip')
-
- def process_snat_dnat_for_fip(self):
- try:
- self.process_floating_ip_nat_rules()
- except Exception:
- # TODO(salv-orlando): Less broad catching
- msg = _('L3 agent failure to setup NAT for floating IPs')
- LOG.exception(msg)
- raise n_exc.FloatingIpSetupException(msg)
-
- def _add_fip_addr_to_device(self, fip, device):
- """Configures the floating ip address on the device.
- """
- try:
- ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
- device.addr.add(ip_cidr)
- return True
- except RuntimeError:
- # any exception occurred here should cause the floating IP
- # to be set in error state
- LOG.warning(_LW("Unable to configure IP address for "
- "floating IP: %s"), fip['id'])
-
- def add_floating_ip(self, fip, interface_name, device):
- raise NotImplementedError()
-
- def remove_floating_ip(self, device, ip_cidr):
- device.delete_addr_and_conntrack_state(ip_cidr)
-
- def move_floating_ip(self, fip):
- return l3_constants.FLOATINGIP_STATUS_ACTIVE
-
- def remove_external_gateway_ip(self, device, ip_cidr):
- device.delete_addr_and_conntrack_state(ip_cidr)
-
- def get_router_cidrs(self, device):
- return set([addr['cidr'] for addr in device.addr.list()])
-
- def process_floating_ip_addresses(self, interface_name):
- """Configure IP addresses on router's external gateway interface.
-
- Ensures addresses for existing floating IPs and cleans up
- those that should not longer be configured.
- """
-
- fip_statuses = {}
- if interface_name is None:
- LOG.debug('No Interface for floating IPs router: %s',
- self.router['id'])
- return fip_statuses
-
- device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
- existing_cidrs = self.get_router_cidrs(device)
- new_cidrs = set()
-
- floating_ips = self.get_floating_ips()
- # Loop once to ensure that floating ips are configured.
- for fip in floating_ips:
- fip_ip = fip['floating_ip_address']
- ip_cidr = common_utils.ip_to_cidr(fip_ip)
- new_cidrs.add(ip_cidr)
- fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
- if ip_cidr not in existing_cidrs:
- fip_statuses[fip['id']] = self.add_floating_ip(
- fip, interface_name, device)
- LOG.debug('Floating ip %(id)s added, status %(status)s',
- {'id': fip['id'],
- 'status': fip_statuses.get(fip['id'])})
- elif (fip_ip in self.fip_map and
- self.fip_map[fip_ip] != fip['fixed_ip_address']):
- LOG.debug("Floating IP was moved from fixed IP "
- "%(old)s to %(new)s",
- {'old': self.fip_map[fip_ip],
- 'new': fip['fixed_ip_address']})
- fip_statuses[fip['id']] = self.move_floating_ip(fip)
- elif fip_statuses[fip['id']] == fip['status']:
- # mark the status as not changed. we can't remove it because
- # that's how the caller determines that it was removed
- fip_statuses[fip['id']] = FLOATINGIP_STATUS_NOCHANGE
- fips_to_remove = (
- ip_cidr for ip_cidr in existing_cidrs - new_cidrs
- if common_utils.is_cidr_host(ip_cidr))
- for ip_cidr in fips_to_remove:
- LOG.debug("Removing floating ip %s from interface %s in "
- "namespace %s", ip_cidr, interface_name, self.ns_name)
- self.remove_floating_ip(device, ip_cidr)
-
- return fip_statuses
-
- def configure_fip_addresses(self, interface_name):
- try:
- return self.process_floating_ip_addresses(interface_name)
- except Exception:
- # TODO(salv-orlando): Less broad catching
- msg = _('L3 agent failure to setup floating IPs')
- LOG.exception(msg)
- raise n_exc.FloatingIpSetupException(msg)
-
- def put_fips_in_error_state(self):
- fip_statuses = {}
- for fip in self.router.get(l3_constants.FLOATINGIP_KEY, []):
- fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
- return fip_statuses
-
- def delete(self, agent):
- self.router['gw_port'] = None
- self.router[l3_constants.INTERFACE_KEY] = []
- self.router[l3_constants.FLOATINGIP_KEY] = []
- self.process_delete(agent)
- self.disable_radvd()
- self.router_namespace.delete()
-
- def _internal_network_updated(self, port, subnet_id, prefix, old_prefix,
- updated_cidrs):
- interface_name = self.get_internal_device_name(port['id'])
- if prefix != l3_constants.PROVISIONAL_IPV6_PD_PREFIX:
- fixed_ips = port['fixed_ips']
- for fixed_ip in fixed_ips:
- if fixed_ip['subnet_id'] == subnet_id:
- v6addr = common_utils.ip_to_cidr(fixed_ip['ip_address'],
- fixed_ip.get('prefixlen'))
- if v6addr not in updated_cidrs:
- self.driver.add_ipv6_addr(interface_name, v6addr,
- self.ns_name)
- else:
- self.driver.delete_ipv6_addr_with_prefix(interface_name,
- old_prefix,
- self.ns_name)
-
- def _internal_network_added(self, ns_name, network_id, port_id,
- fixed_ips, mac_address,
- interface_name, prefix, mtu=None):
- LOG.debug("adding internal network: prefix(%s), port(%s)",
- prefix, port_id)
- self.driver.plug(network_id, port_id, interface_name, mac_address,
- namespace=ns_name,
- prefix=prefix, mtu=mtu)
-
- ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips)
- self.driver.init_router_port(
- interface_name, ip_cidrs, namespace=ns_name)
- for fixed_ip in fixed_ips:
- ip_lib.send_ip_addr_adv_notif(ns_name,
- interface_name,
- fixed_ip['ip_address'],
- self.agent_conf)
-
- def internal_network_added(self, port):
- network_id = port['network_id']
- port_id = port['id']
- fixed_ips = port['fixed_ips']
- mac_address = port['mac_address']
-
- interface_name = self.get_internal_device_name(port_id)
-
- self._internal_network_added(self.ns_name,
- network_id,
- port_id,
- fixed_ips,
- mac_address,
- interface_name,
- INTERNAL_DEV_PREFIX,
- mtu=port.get('mtu'))
-
- def internal_network_removed(self, port):
- interface_name = self.get_internal_device_name(port['id'])
- LOG.debug("removing internal network: port(%s) interface(%s)",
- port['id'], interface_name)
- if ip_lib.device_exists(interface_name, namespace=self.ns_name):
- self.driver.unplug(interface_name, namespace=self.ns_name,
- prefix=INTERNAL_DEV_PREFIX)
-
- def _get_existing_devices(self):
- ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
- ip_devs = ip_wrapper.get_devices(exclude_loopback=True)
- return [ip_dev.name for ip_dev in ip_devs]
-
- @staticmethod
- def _get_updated_ports(existing_ports, current_ports):
- updated_ports = dict()
- current_ports_dict = {p['id']: p for p in current_ports}
- for existing_port in existing_ports:
- current_port = current_ports_dict.get(existing_port['id'])
- if current_port:
- if (sorted(existing_port['fixed_ips'],
- key=common_utils.safe_sort_key) !=
- sorted(current_port['fixed_ips'],
- key=common_utils.safe_sort_key)):
- updated_ports[current_port['id']] = current_port
- return updated_ports
-
- @staticmethod
- def _port_has_ipv6_subnet(port):
- if 'subnets' in port:
- for subnet in port['subnets']:
- if (netaddr.IPNetwork(subnet['cidr']).version == 6 and
- subnet['cidr'] != l3_constants.PROVISIONAL_IPV6_PD_PREFIX):
- return True
-
- def enable_radvd(self, internal_ports=None):
- LOG.debug('Spawning radvd daemon in router device: %s', self.router_id)
- if not internal_ports:
- internal_ports = self.internal_ports
- self.radvd.enable(internal_ports)
-
- def disable_radvd(self):
- LOG.debug('Terminating radvd daemon in router device: %s',
- self.router_id)
- self.radvd.disable()
-
- def internal_network_updated(self, interface_name, ip_cidrs):
- self.driver.init_router_port(
- interface_name,
- ip_cidrs=ip_cidrs,
- namespace=self.ns_name)
-
- def address_scope_mangle_rule(self, device_name, mark_mask):
- return '-i %s -j MARK --set-xmark %s' % (device_name, mark_mask)
-
- def address_scope_filter_rule(self, device_name, mark_mask):
- return '-o %s -m mark ! --mark %s -j DROP' % (
- device_name, mark_mask)
-
- def _process_internal_ports(self, pd):
- existing_port_ids = set(p['id'] for p in self.internal_ports)
-
- internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
- current_port_ids = set(p['id'] for p in internal_ports
- if p['admin_state_up'])
-
- new_port_ids = current_port_ids - existing_port_ids
- new_ports = [p for p in internal_ports if p['id'] in new_port_ids]
- old_ports = [p for p in self.internal_ports
- if p['id'] not in current_port_ids]
- updated_ports = self._get_updated_ports(self.internal_ports,
- internal_ports)
-
- enable_ra = False
- for p in new_ports:
- self.internal_network_added(p)
- LOG.debug("appending port %s to internal_ports cache", p)
- self.internal_ports.append(p)
- enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
- for subnet in p['subnets']:
- if ipv6_utils.is_ipv6_pd_enabled(subnet):
- interface_name = self.get_internal_device_name(p['id'])
- pd.enable_subnet(self.router_id, subnet['id'],
- subnet['cidr'],
- interface_name, p['mac_address'])
-
- for p in old_ports:
- self.internal_network_removed(p)
- LOG.debug("removing port %s from internal_ports cache", p)
- self.internal_ports.remove(p)
- enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
- for subnet in p['subnets']:
- if ipv6_utils.is_ipv6_pd_enabled(subnet):
- pd.disable_subnet(self.router_id, subnet['id'])
-
- updated_cidrs = []
- if updated_ports:
- for index, p in enumerate(internal_ports):
- if not updated_ports.get(p['id']):
- continue
- self.internal_ports[index] = updated_ports[p['id']]
- interface_name = self.get_internal_device_name(p['id'])
- ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips'])
- LOG.debug("updating internal network for port %s", p)
- updated_cidrs += ip_cidrs
- self.internal_network_updated(interface_name, ip_cidrs)
- enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
-
- # Check if there is any pd prefix update
- for p in internal_ports:
- if p['id'] in (set(current_port_ids) & set(existing_port_ids)):
- for subnet in p.get('subnets', []):
- if ipv6_utils.is_ipv6_pd_enabled(subnet):
- old_prefix = pd.update_subnet(self.router_id,
- subnet['id'],
- subnet['cidr'])
- if old_prefix:
- self._internal_network_updated(p, subnet['id'],
- subnet['cidr'],
- old_prefix,
- updated_cidrs)
- enable_ra = True
-
- # Enable RA
- if enable_ra:
- self.enable_radvd(internal_ports)
-
- existing_devices = self._get_existing_devices()
- current_internal_devs = set(n for n in existing_devices
- if n.startswith(INTERNAL_DEV_PREFIX))
- current_port_devs = set(self.get_internal_device_name(port_id)
- for port_id in current_port_ids)
- stale_devs = current_internal_devs - current_port_devs
- for stale_dev in stale_devs:
- LOG.debug('Deleting stale internal router device: %s',
- stale_dev)
- pd.remove_stale_ri_ifname(self.router_id, stale_dev)
- self.driver.unplug(stale_dev,
- namespace=self.ns_name,
- prefix=INTERNAL_DEV_PREFIX)
-
- def _list_floating_ip_cidrs(self):
- # Compute a list of addresses this router is supposed to have.
- # This avoids unnecessarily removing those addresses and
- # causing a momentarily network outage.
- floating_ips = self.get_floating_ips()
- return [common_utils.ip_to_cidr(ip['floating_ip_address'])
- for ip in floating_ips]
-
- def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name):
- self.ovs_driver.plug(ex_gw_port['network_id'],
- ex_gw_port['id'],
- interface_name,
- ex_gw_port['mac_address'],
- bridge=self.agent_conf.external_network_bridge,
- namespace=ns_name,
- prefix=EXTERNAL_DEV_PREFIX,
- mtu=ex_gw_port.get('mtu'))
-
- def _get_external_gw_ips(self, ex_gw_port):
- gateway_ips = []
- if 'subnets' in ex_gw_port:
- gateway_ips = [subnet['gateway_ip']
- for subnet in ex_gw_port['subnets']
- if subnet['gateway_ip']]
- if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips):
- # No IPv6 gateway is available, but IPv6 is enabled.
- if self.agent_conf.ipv6_gateway:
- # ipv6_gateway configured, use address for default route.
- gateway_ips.append(self.agent_conf.ipv6_gateway)
- return gateway_ips
-
- def _add_route_to_gw(self, ex_gw_port, device_name,
- namespace, preserve_ips):
- # Note: ipv6_gateway is an ipv6 LLA
- # and so doesn't need a special route
- for subnet in ex_gw_port.get('subnets', []):
- is_gateway_not_in_subnet = (subnet['gateway_ip'] and
- not ipam_utils.check_subnet_ip(
- subnet['cidr'],
- subnet['gateway_ip']))
- if is_gateway_not_in_subnet:
- preserve_ips.append(subnet['gateway_ip'])
- device = ip_lib.IPDevice(device_name, namespace=namespace)
- device.route.add_route(subnet['gateway_ip'], scope='link')
-
- def _external_gateway_added(self, ex_gw_port, interface_name,
- ns_name, preserve_ips):
- LOG.debug("External gateway added: port(%s), interface(%s), ns(%s)",
- ex_gw_port, interface_name, ns_name)
- self._plug_external_gateway(ex_gw_port, interface_name, ns_name)
-
- # Build up the interface and gateway IP addresses that
- # will be added to the interface.
- ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
-
- gateway_ips = self._get_external_gw_ips(ex_gw_port)
- enable_ra_on_gw = False
- if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips):
- # There is no IPv6 gw_ip, use RouterAdvt for default route.
- enable_ra_on_gw = True
-
- self._add_route_to_gw(ex_gw_port, device_name=interface_name,
- namespace=ns_name, preserve_ips=preserve_ips)
- self.ovs_driver.init_router_port(
- interface_name,
- ip_cidrs,
- namespace=ns_name,
- extra_subnets=ex_gw_port.get('extra_subnets', []),
- preserve_ips=preserve_ips,
- clean_connections=True)
-
- device = ip_lib.IPDevice(interface_name, namespace=ns_name)
- for ip in gateway_ips or []:
- device.route.add_gateway(ip)
-
- if enable_ra_on_gw:
- self.driver.configure_ipv6_ra(ns_name, interface_name)
-
- for fixed_ip in ex_gw_port['fixed_ips']:
- ip_lib.send_ip_addr_adv_notif(ns_name,
- interface_name,
- fixed_ip['ip_address'],
- self.agent_conf)
-
- def is_v6_gateway_set(self, gateway_ips):
- """Check to see if list of gateway_ips has an IPv6 gateway.
- """
- # Note - don't require a try-except here as all
- # gateway_ips elements are valid addresses, if they exist.
- return any(netaddr.IPAddress(gw_ip).version == 6
- for gw_ip in gateway_ips)
-
- def external_gateway_added(self, ex_gw_port, interface_name):
- preserve_ips = self._list_floating_ip_cidrs()
- self._external_gateway_added(
- ex_gw_port, interface_name, self.ns_name, preserve_ips)
-
- def external_gateway_updated(self, ex_gw_port, interface_name):
- preserve_ips = self._list_floating_ip_cidrs()
- self._external_gateway_added(
- ex_gw_port, interface_name, self.ns_name, preserve_ips)
-
- def external_gateway_removed(self, ex_gw_port, interface_name):
- LOG.debug("External gateway removed: port(%s), interface(%s)",
- ex_gw_port, interface_name)
- device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
- for ip_addr in ex_gw_port['fixed_ips']:
- self.remove_external_gateway_ip(device,
- common_utils.ip_to_cidr(
- ip_addr['ip_address'],
- ip_addr['prefixlen']))
- self.ovs_driver.unplug(interface_name,
- bridge=self.agent_conf.external_network_bridge,
- namespace=self.ns_name,
- prefix=EXTERNAL_DEV_PREFIX)
-
- @staticmethod
- def _gateway_ports_equal(port1, port2):
- return port1 == port2
-
- def _process_external_gateway(self, ex_gw_port, pd):
- # TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port
- ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
- self.ex_gw_port and self.ex_gw_port['id'])
-
- interface_name = None
- if ex_gw_port_id:
- interface_name = self.get_external_device_name(ex_gw_port_id)
- if ex_gw_port:
- if not self.ex_gw_port:
- self.external_gateway_added(ex_gw_port, interface_name)
- pd.add_gw_interface(self.router['id'], interface_name)
- elif not self._gateway_ports_equal(ex_gw_port, self.ex_gw_port):
- self.external_gateway_updated(ex_gw_port, interface_name)
- elif not ex_gw_port and self.ex_gw_port:
- self.external_gateway_removed(self.ex_gw_port, interface_name)
- pd.remove_gw_interface(self.router['id'])
-
- existing_devices = self._get_existing_devices()
- stale_devs = [dev for dev in existing_devices
- if dev.startswith(EXTERNAL_DEV_PREFIX)
- and dev != interface_name]
- for stale_dev in stale_devs:
- LOG.debug('Deleting stale external router device: %s', stale_dev)
- pd.remove_gw_interface(self.router['id'])
- self.ovs_driver.unplug(stale_dev,
- bridge=self.agent_conf.external_network_bridge,
- namespace=self.ns_name,
- prefix=EXTERNAL_DEV_PREFIX)
-
- # Process SNAT rules for external gateway
- gw_port = self._router.get('gw_port')
- self._handle_router_snat_rules(gw_port, interface_name)
-
- def _prevent_snat_for_internal_traffic_rule(self, interface_name):
- return (
- 'POSTROUTING', '! -i %(interface_name)s '
- '! -o %(interface_name)s -m conntrack ! '
- '--ctstate DNAT -j ACCEPT' %
- {'interface_name': interface_name})
-
- def external_gateway_nat_fip_rules(self, ex_gw_ip, interface_name):
- dont_snat_traffic_to_internal_ports_if_not_to_floating_ip = (
- self._prevent_snat_for_internal_traffic_rule(interface_name))
- # Makes replies come back through the router to reverse DNAT
- ext_in_mark = self.agent_conf.external_ingress_mark
- snat_internal_traffic_to_floating_ip = (
- 'snat', '-m mark ! --mark %s/%s '
- '-m conntrack --ctstate DNAT '
- '-j SNAT --to-source %s'
- % (ext_in_mark, l3_constants.ROUTER_MARK_MASK, ex_gw_ip))
- return [dont_snat_traffic_to_internal_ports_if_not_to_floating_ip,
- snat_internal_traffic_to_floating_ip]
-
- def external_gateway_nat_snat_rules(self, ex_gw_ip, interface_name):
- snat_normal_external_traffic = (
- 'snat', '-o %s -j SNAT --to-source %s' %
- (interface_name, ex_gw_ip))
- return [snat_normal_external_traffic]
-
- def external_gateway_mangle_rules(self, interface_name):
- mark = self.agent_conf.external_ingress_mark
- mark_packets_entering_external_gateway_port = (
- 'mark', '-i %s -j MARK --set-xmark %s/%s' %
- (interface_name, mark, l3_constants.ROUTER_MARK_MASK))
- return [mark_packets_entering_external_gateway_port]
-
- def _empty_snat_chains(self, iptables_manager):
- iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
- iptables_manager.ipv4['nat'].empty_chain('snat')
- iptables_manager.ipv4['mangle'].empty_chain('mark')
- iptables_manager.ipv4['mangle'].empty_chain('POSTROUTING')
-
- def _add_snat_rules(self, ex_gw_port, iptables_manager,
- interface_name):
- self.process_external_port_address_scope_routing(iptables_manager)
-
- if ex_gw_port:
- # ex_gw_port should not be None in this case
- # NAT rules are added only if ex_gw_port has an IPv4 address
- for ip_addr in ex_gw_port['fixed_ips']:
- ex_gw_ip = ip_addr['ip_address']
- if netaddr.IPAddress(ex_gw_ip).version == 4:
- if self._snat_enabled:
- rules = self.external_gateway_nat_snat_rules(
- ex_gw_ip, interface_name)
- for rule in rules:
- iptables_manager.ipv4['nat'].add_rule(*rule)
-
- rules = self.external_gateway_nat_fip_rules(
- ex_gw_ip, interface_name)
- for rule in rules:
- iptables_manager.ipv4['nat'].add_rule(*rule)
- rules = self.external_gateway_mangle_rules(interface_name)
- for rule in rules:
- iptables_manager.ipv4['mangle'].add_rule(*rule)
-
- break
-
- def _handle_router_snat_rules(self, ex_gw_port, interface_name):
- self._empty_snat_chains(self.iptables_manager)
-
- self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
-
- self._add_snat_rules(ex_gw_port,
- self.iptables_manager,
- interface_name)
-
- def _process_external_on_delete(self, agent):
- fip_statuses = {}
- try:
- ex_gw_port = self.get_ex_gw_port()
- self._process_external_gateway(ex_gw_port, agent.pd)
- if not ex_gw_port:
- return
-
- interface_name = self.get_external_device_interface_name(
- ex_gw_port)
- fip_statuses = self.configure_fip_addresses(interface_name)
-
- except (n_exc.FloatingIpSetupException):
- # All floating IPs must be put in error state
- LOG.exception(_LE("Failed to process floating IPs."))
- fip_statuses = self.put_fips_in_error_state()
- finally:
- self.update_fip_statuses(agent, fip_statuses)
-
- def process_external(self, agent):
- fip_statuses = {}
- try:
- with self.iptables_manager.defer_apply():
- ex_gw_port = self.get_ex_gw_port()
- self._process_external_gateway(ex_gw_port, agent.pd)
- if not ex_gw_port:
- return
-
- # Process SNAT/DNAT rules and addresses for floating IPs
- self.process_snat_dnat_for_fip()
-
- # Once NAT rules for floating IPs are safely in place
- # configure their addresses on the external gateway port
- interface_name = self.get_external_device_interface_name(
- ex_gw_port)
- fip_statuses = self.configure_fip_addresses(interface_name)
-
- except (n_exc.FloatingIpSetupException,
- n_exc.IpTablesApplyException):
- # All floating IPs must be put in error state
- LOG.exception(_LE("Failed to process floating IPs."))
- fip_statuses = self.put_fips_in_error_state()
- finally:
- self.update_fip_statuses(agent, fip_statuses)
-
- def update_fip_statuses(self, agent, fip_statuses):
- # Identify floating IPs which were disabled
- existing_floating_ips = self.floating_ips
- self.floating_ips = set(fip_statuses.keys())
- for fip_id in existing_floating_ips - self.floating_ips:
- fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN
- # filter out statuses that didn't change
- fip_statuses = {f: stat for f, stat in fip_statuses.items()
- if stat != FLOATINGIP_STATUS_NOCHANGE}
- if not fip_statuses:
- return
- LOG.debug('Sending floating ip statuses: %s', fip_statuses)
- # Update floating IP status on the neutron server
- agent.plugin_rpc.update_floatingip_statuses(
- agent.context, self.router_id, fip_statuses)
-
- def _get_port_devicename_scopemark(self, ports, name_generator):
- devicename_scopemark = {l3_constants.IP_VERSION_4: dict(),
- l3_constants.IP_VERSION_6: dict()}
- for p in ports:
- device_name = name_generator(p['id'])
- ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips'])
- port_as_marks = self.get_port_address_scope_mark(p)
- for ip_version in {ip_lib.get_ip_version(cidr)
- for cidr in ip_cidrs}:
- devicename_scopemark[ip_version][device_name] = (
- port_as_marks[ip_version])
-
- return devicename_scopemark
-
- def _get_address_scope_mark(self):
- # Prepare address scope iptables rule for internal ports
- internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
- ports_scopemark = self._get_port_devicename_scopemark(
- internal_ports, self.get_internal_device_name)
-
- # Prepare address scope iptables rule for external port
- external_port = self.get_ex_gw_port()
- if external_port:
- external_port_scopemark = self._get_port_devicename_scopemark(
- [external_port], self.get_external_device_name)
- for ip_version in (l3_constants.IP_VERSION_4,
- l3_constants.IP_VERSION_6):
- ports_scopemark[ip_version].update(
- external_port_scopemark[ip_version])
- return ports_scopemark
-
- def _add_address_scope_mark(self, iptables_manager, ports_scopemark):
- external_device_name = None
- external_port = self.get_ex_gw_port()
- if external_port:
- external_device_name = self.get_external_device_name(
- external_port['id'])
-
- # Process address scope iptables rules
- for ip_version in (l3_constants.IP_VERSION_4,
- l3_constants.IP_VERSION_6):
- scopemarks = ports_scopemark[ip_version]
- iptables = iptables_manager.get_tables(ip_version)
- iptables['mangle'].empty_chain('scope')
- iptables['filter'].empty_chain('scope')
- dont_block_external = (ip_version == l3_constants.IP_VERSION_4
- and self._snat_enabled and external_port)
- for device_name, mark in scopemarks.items():
- # Add address scope iptables rule
- iptables['mangle'].add_rule(
- 'scope',
- self.address_scope_mangle_rule(device_name, mark))
- if dont_block_external and device_name == external_device_name:
- continue
- iptables['filter'].add_rule(
- 'scope',
- self.address_scope_filter_rule(device_name, mark))
-
- def process_ports_address_scope_iptables(self):
- ports_scopemark = self._get_address_scope_mark()
- self._add_address_scope_mark(self.iptables_manager, ports_scopemark)
-
- def _get_external_address_scope(self):
- external_port = self.get_ex_gw_port()
- if not external_port:
- return
-
- scopes = external_port.get('address_scopes', {})
- return scopes.get(str(l3_constants.IP_VERSION_4))
-
- def process_external_port_address_scope_routing(self, iptables_manager):
- if not self._snat_enabled:
- return
-
- external_port = self.get_ex_gw_port()
- if not external_port:
- return
-
- external_devicename = self.get_external_device_name(
- external_port['id'])
-
- # Saves the originating address scope by saving the packet MARK to
- # the CONNMARK for new connections so that returning traffic can be
- # match to it.
- rule = ('-o %s -m connmark --mark 0x0/0xffff0000 '
- '-j CONNMARK --save-mark '
- '--nfmask 0xffff0000 --ctmask 0xffff0000' %
- external_devicename)
-
- iptables_manager.ipv4['mangle'].add_rule('POSTROUTING', rule)
-
- address_scope = self._get_external_address_scope()
- if not address_scope:
- return
-
- # Prevents snat within the same address scope
- rule = '-o %s -m connmark --mark %s -j ACCEPT' % (
- external_devicename,
- self.get_address_scope_mark_mask(address_scope))
- iptables_manager.ipv4['nat'].add_rule('snat', rule)
-
- def process_address_scope(self):
- with self.iptables_manager.defer_apply():
- self.process_ports_address_scope_iptables()
- self.process_floating_ip_address_scope_rules()
-
- @common_utils.exception_logger()
- def process_delete(self, agent):
- """Process the delete of this router
-
- This method is the point where the agent requests that this router
- be deleted. This is a separate code path from process in that it
- avoids any changes to the qrouter namespace that will be removed
- at the end of the operation.
-
- :param agent: Passes the agent in order to send RPC messages.
- """
- LOG.debug("process router delete")
- if self.router_namespace.exists():
- self._process_internal_ports(agent.pd)
- agent.pd.sync_router(self.router['id'])
- self._process_external_on_delete(agent)
- else:
- LOG.warning(_LW("Can't gracefully delete the router %s: "
- "no router namespace found."), self.router['id'])
-
- @common_utils.exception_logger()
- def process(self, agent):
- """Process updates to this router
-
- This method is the point where the agent requests that updates be
- applied to this router.
-
- :param agent: Passes the agent in order to send RPC messages.
- """
- LOG.debug("process router updates")
- self._process_internal_ports(agent.pd)
- agent.pd.sync_router(self.router['id'])
- self.process_external(agent)
- self.process_address_scope()
- # Process static routes for router
- self.routes_updated(self.routes, self.router['routes'])
- self.routes = self.router['routes']
-
- # Update ex_gw_port and enable_snat on the router info cache
- self.ex_gw_port = self.get_ex_gw_port()
- self.fip_map = dict([(fip['floating_ip_address'],
- fip['fixed_ip_address'])
- for fip in self.get_floating_ips()])
- # TODO(Carl) FWaaS uses this. Why is it set after processing is done?
- self.enable_snat = self.router.get('enable_snat')
diff --git a/build/nics-template.yaml.jinja2 b/build/nics-template.yaml.jinja2
index c7d0a1b8..920e9a1b 100644
--- a/build/nics-template.yaml.jinja2
+++ b/build/nics-template.yaml.jinja2
@@ -174,8 +174,11 @@ resources:
use_dhcp: false
{%- else %}
-
- type: interface
+ type: {{ nets['tenant']['nic_mapping'][role]['phys_type'] }}
name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
+ {%- if 'uio_driver' in nets['tenant']['nic_mapping'][role] %}
+ uio_driver: {{ nets['tenant']['nic_mapping'][role]['uio_driver'] }}
+ {%- endif %}
use_dhcp: false
addresses:
-
diff --git a/build/opnfv-environment.yaml b/build/opnfv-environment.yaml
index 7e0f8017..3781e791 100644
--- a/build/opnfv-environment.yaml
+++ b/build/opnfv-environment.yaml
@@ -2,17 +2,16 @@
#types
parameters:
-# CloudDomain:
+ #CloudDomain:
parameter_defaults:
CeilometerStoreEvents: true
NeutronEnableForceMetadata: true
NeutronEnableDHCPMetadata: true
NeutronEnableIsolatedMetadata: true
- OvercloudControlFlavor: control
- OvercloudComputeFlavor: compute
- controllerImage: overcloud-full
-
+ #NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+ # Kernel arguments, this value will be set to kernel arguments specified for compute nodes in deploy setting file.
+ #ComputeKernelArgs: "intel_iommu=on iommu=pt default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
ExtraConfig:
tripleo::ringbuilder::build_ring: False
nova::nova_public_key:
@@ -26,6 +25,8 @@ parameter_defaults:
key: 'os_compute_api:servers:show:host_status'
value: 'rule:admin_or_owner'
nova::api::default_floating_pool: 'external'
+ #neutron::agents::dhcp::interface_driver: "neutron.agent.linux.interface.NSDriver"
+ #neutron::agents::l3::interface_driver: "neutron.agent.linux.interface.NSDriver"
ControllerServices:
- OS::TripleO::Services::CACerts
# - OS::TripleO::Services::CephClient
@@ -106,6 +107,7 @@ parameter_defaults:
- OS::TripleO::Services::Etcd
- OS::TripleO::Services::Gluon
- OS::TripleO::Services::Tacker
+ - OS::TripleO::Services::NeutronHoneycombAgent
ComputeServices:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CephClient
@@ -129,3 +131,4 @@ parameter_defaults:
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
- OS::TripleO::Services::VipHosts
+ - OS::TripleO::Services::NeutronHoneycombAgent
diff --git a/build/overcloud-full.sh b/build/overcloud-full.sh
index db695daf..a360689b 100755
--- a/build/overcloud-full.sh
+++ b/build/overcloud-full.sh
@@ -31,8 +31,7 @@ git archive --format=tar.gz --prefix=tripleo/ HEAD > ${BUILD_DIR}/opnfv-puppet-t
popd > /dev/null
# download customized os-net-config
-rm -fr os-net-config
-git clone https://github.com/trozet/os-net-config.git -b stable/danube
+clone_fork os-net-config
pushd os-net-config/os_net_config > /dev/null
git archive --format=tar.gz --prefix=os_net_config/ HEAD > ${BUILD_DIR}/os-net-config.tar.gz
popd > /dev/null
@@ -135,16 +134,11 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--upload ${BUILD_DIR}/noarch/$tackerclient_pkg:/root/ \
--install /root/$tackerclient_pkg \
--run-command "pip install python-senlinclient" \
- --upload ${BUILD_ROOT}/neutron/agent/interface/interface.py:/usr/lib/python2.7/site-packages/neutron/agent/linux/ \
- --run-command "mkdir /root/fdio_neutron_l3" \
- --upload ${BUILD_ROOT}/neutron/agent/l3/namespaces.py:/root/fdio_neutron_l3/ \
- --upload ${BUILD_ROOT}/neutron/agent/l3/router_info.py:/root/fdio_neutron_l3/ \
- --upload ${BUILD_ROOT}/puppet-neutron/manifests/agents/ml2/networking-vpp.pp:/etc/puppet/modules/neutron/manifests/agents/ml2/ \
- --upload ${BUILD_ROOT}/puppet-neutron/manifests/plugins/ml2/networking-vpp.pp:/etc/puppet/modules/neutron/manifests/plugins/ml2/ \
- --upload ${BUILD_ROOT}/puppet-neutron/lib/puppet/type/neutron_agent_vpp.rb:/etc/puppet/modules/neutron/lib/puppet/type/ \
- --mkdir /etc/puppet/modules/neutron/lib/puppet/provider/neutron_agent_vpp \
- --upload ${BUILD_ROOT}/puppet-neutron/lib/puppet/provider/neutron_agent_vpp/ini_setting.rb:/etc/puppet/modules/neutron/lib/puppet/provider/neutron_agent_vpp/ \
--run-command "sed -i -E 's/timeout=[0-9]+/timeout=60/g' /usr/share/openstack-puppet/modules/rabbitmq/lib/puppet/provider/rabbitmqctl.rb" \
+ --upload ${BUILD_ROOT}/neutron-patch-NSDriver.patch:/usr/lib/python2.7/site-packages/ \
+ --run-command "cd /usr/lib/python2.7/site-packages/ && patch -p1 < neutron-patch-NSDriver.patch" \
+ --upload ${BUILD_ROOT}/puppet-neutron-add-odl-settings.patch:/usr/share/openstack-puppet/modules/neutron/ \
+ --run-command "cd /usr/share/openstack-puppet/modules/neutron && patch -p1 < puppet-neutron-add-odl-settings.patch" \
-a overcloud-full_build.qcow2
mv -f overcloud-full_build.qcow2 overcloud-full.qcow2
diff --git a/build/overcloud-opendaylight.sh b/build/overcloud-opendaylight.sh
index af745f18..66bf53a7 100755
--- a/build/overcloud-opendaylight.sh
+++ b/build/overcloud-opendaylight.sh
@@ -71,10 +71,16 @@ pushd netready/ > /dev/null
git archive --format=tar.gz HEAD:deploy/puppet/ > ${BUILD_DIR}/puppet-gluon.tar.gz
popd > /dev/null
+# Tar up all quagga/zrpc rpms
+pushd ${QUAGGA_RPMS_DIR}/rpmbuild/RPMS > /dev/null
+tar --transform "s/^x86_64/quagga/" -czvf ${BUILD_DIR}/quagga.tar.gz x86_64/
+popd > /dev/null
+
# install ODL packages
# install Jolokia for ODL HA
# Patch in OPNFV custom puppet-tripleO
# install Honeycomb
+# install quagga/zrpc
LIBGUESTFS_BACKEND=direct virt-customize \
--upload ${BUILD_DIR}/opendaylight_boron.repo:/etc/yum.repos.d/opendaylight.repo \
--run-command "yum install --downloadonly --downloaddir=/root/boron/ opendaylight" \
@@ -96,6 +102,10 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--install epel-release \
--install python-click \
--install http://artifacts.opnfv.org/netready/gluon-0.0.1-1_20170216.noarch.rpm \
+ --upload ${BUILD_DIR}/quagga.tar.gz:/root/ \
+ --run-command "cd /root/ && tar xzf quagga.tar.gz" \
+ --install zeromq-4.1.4,zeromq-devel-4.1.4 \
+ --install capnproto-devel,capnproto-libs,capnproto \
-a overcloud-full-opendaylight_build.qcow2
mv overcloud-full-opendaylight_build.qcow2 overcloud-full-opendaylight.qcow2
diff --git a/build/patches/fix_quagga_make_dist.patch b/build/patches/fix_quagga_make_dist.patch
new file mode 100644
index 00000000..8f854e50
--- /dev/null
+++ b/build/patches/fix_quagga_make_dist.patch
@@ -0,0 +1,28 @@
+From c31749157aabca758ef731ad4d15ddf4cc2efe66 Mon Sep 17 00:00:00 2001
+From: Romanos Skiadas <rski@intracom-telecom.com>
+Date: Mon, 6 Feb 2017 15:28:44 +0200
+Subject: [PATCH] lib: Include missing ccapnproto header in Makefile
+
+Without this make dist doesn't include the header and the resulting
+archive doesn't build.
+
+Signed-off-by: Romanos Skiadas <rski@intracom-telecom.com>
+---
+ lib/Makefile.am | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/lib/Makefile.am b/lib/Makefile.am
+index c286833..c7682ed 100644
+--- a/lib/Makefile.am
++++ b/lib/Makefile.am
+@@ -43,6 +43,7 @@ endif
+ if HAVE_CCAPNPROTO
+ libzebra_la_SOURCES += qzc.capnp.c
+ BUILT_SOURCES += qzc.capnp.c
++pkginclude_HEADERS += qzc.capnp.h
+ endif
+
+ EXTRA_DIST = \
+--
+1.8.3.1
+
diff --git a/build/patches/fix_zrpcd_make_dist.patch b/build/patches/fix_zrpcd_make_dist.patch
new file mode 100644
index 00000000..a0f65b7f
--- /dev/null
+++ b/build/patches/fix_zrpcd_make_dist.patch
@@ -0,0 +1,29 @@
+From ad66cdee4ffe8225d4534137734cf62944ce45c8 Mon Sep 17 00:00:00 2001
+From: Romanos Skiadas <rski@intracom-telecom.com>
+Date: Mon, 6 Feb 2017 18:43:12 +0000
+Subject: [PATCH] make dist: Include all headers required for compilation
+
+Some headers where missing from the archive resulting from 'make dist'.
+
+Signed-off-by: Romanos Skiadas <rski@intracom-telecom.com>
+---
+ zrpcd/Makefile.am | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/zrpcd/Makefile.am b/zrpcd/Makefile.am
+index 50db7fa..f4081d3 100644
+--- a/zrpcd/Makefile.am
++++ b/zrpcd/Makefile.am
+@@ -18,6 +18,9 @@ libzrpc_a_SOURCES = \
+ qzmqclient.c qzcclient.capnp.c qzcclient.c zrpc_util.c \
+ zrpc_bgp_capnp.c
+
++pkginclude_HEADERS = \
++ zrpc_os_wrapper.h zrpc_global.h
++
+ noinst_HEADERS = \
+ bgp_configurator.h bgp_updater.h vpnservice_types.h zrpc_bgp_updater.h \
+ zrpc_bgp_configurator.h zrpc_bgp_updater.h zrpc_debug.h zrpc_memory.h \
+--
+1.8.3.1
+
diff --git a/build/patches/zrpcd_hardcoded_paths.patch b/build/patches/zrpcd_hardcoded_paths.patch
new file mode 100644
index 00000000..27115ca7
--- /dev/null
+++ b/build/patches/zrpcd_hardcoded_paths.patch
@@ -0,0 +1,58 @@
+From 48125816cf99b03f20496bce06850f05cdf2914a Mon Sep 17 00:00:00 2001
+From: Romanos Skiadas <rski@intracom-telecom.com>
+Date: Fri, 10 Feb 2017 12:48:46 +0000
+Subject: [PATCH] Change hardcoded paths
+
+The path to the bgpd executable and bgpd's pid file were hardcoded
+and not correct when zrpcd is packaged for Apex.
+This patch is a temporary fix until the paths are no longer hardcoded
+in the upstream project.
+---
+ zrpcd/zrpc_vpnservice.c | 8 +-------
+ zrpcd/zrpc_vpnservice.h | 4 ++--
+ 2 files changed, 3 insertions(+), 9 deletions(-)
+
+diff --git a/zrpcd/zrpc_vpnservice.c b/zrpcd/zrpc_vpnservice.c
+index a9de91d..28c8293 100644
+--- a/zrpcd/zrpc_vpnservice.c
++++ b/zrpcd/zrpc_vpnservice.c
+@@ -217,20 +217,14 @@ static void zrpc_vpnservice_callback (void *arg, void *zmqsock, struct zmq_msg_t
+ return;
+ }
+
+-#define SBIN_DIR "/sbin"
+
+ void zrpc_vpnservice_setup(struct zrpc_vpnservice *setup)
+ {
+- char bgpd_location_path[128];
+- char *ptr = bgpd_location_path;
+-
+ setup->zrpc_listen_port = ZRPC_LISTEN_PORT;
+ setup->zrpc_notification_port = ZRPC_NOTIFICATION_PORT;
+ setup->zmq_sock = ZRPC_STRDUP(ZMQ_SOCK);
+ setup->zmq_subscribe_sock = ZRPC_STRDUP(ZMQ_NOTIFY);
+- ptr+=sprintf(ptr, "%s", BGPD_PATH_QUAGGA);
+- ptr+=sprintf(ptr, "%s/bgpd",SBIN_DIR);
+- setup->bgpd_execution_path = ZRPC_STRDUP(bgpd_location_path);
++ setup->bgpd_execution_path = ZRPC_STRDUP(BGPD_EXECUTION_PATH);
+ }
+
+ void zrpc_vpnservice_terminate(struct zrpc_vpnservice *setup)
+diff --git a/zrpcd/zrpc_vpnservice.h b/zrpcd/zrpc_vpnservice.h
+index 12863a4..96331e2 100644
+--- a/zrpcd/zrpc_vpnservice.h
++++ b/zrpcd/zrpc_vpnservice.h
+@@ -21,8 +21,8 @@
+ #define BGPD_ARGS_STRING_1 "-p"
+ #define BGPD_ARGS_STRING_3 "-Z"
+
+-#define BGPD_PATH_BGPD_PID "/opt/quagga/var/run/quagga/bgpd.pid"
+-#define BGPD_PATH_QUAGGA "/opt/quagga"
++#define BGPD_PATH_BGPD_PID "/var/run/quagga/bgpd.pid"
++#define BGPD_EXECUTION_PATH "/usr/sbin/bgpd"
+
+ #define ZRPC_CONFIG_FILE "zrpcd.conf"
+
+--
+1.8.3.1
+
diff --git a/build/puppet-neutron-add-odl-settings.patch b/build/puppet-neutron-add-odl-settings.patch
new file mode 100644
index 00000000..aa0b35a1
--- /dev/null
+++ b/build/puppet-neutron-add-odl-settings.patch
@@ -0,0 +1,47 @@
+diff --git a/manifests/plugins/ml2/opendaylight.pp b/manifests/plugins/ml2/opendaylight.pp
+index a27c4d6..13b56c4 100644
+--- a/manifests/plugins/ml2/opendaylight.pp
++++ b/manifests/plugins/ml2/opendaylight.pp
+@@ -29,12 +29,22 @@
+ # (optional) The URI used to connect to the local OVSDB server
+ # Defaults to 'tcp:127.0.0.1:6639'
+ #
++# [*port_binding_controller*]
++# (optional) Name of the controller to be used for port binding.
++# Defaults to $::os_service_default
++#
++# [*odl_hostconf_uri*]
++# (optional) Path for ODL host configuration REST interface.
++# Defaults to $::os_service_default
++#
+ class neutron::plugins::ml2::opendaylight (
+- $package_ensure = 'present',
+- $odl_username = $::os_service_default,
+- $odl_password = $::os_service_default,
+- $odl_url = $::os_service_default,
+- $ovsdb_connection = 'tcp:127.0.0.1:6639',
++ $package_ensure = 'present',
++ $odl_username = $::os_service_default,
++ $odl_password = $::os_service_default,
++ $odl_url = $::os_service_default,
++ $ovsdb_connection = 'tcp:127.0.0.1:6639',
++ $port_binding_controller = $::os_service_default,
++ $odl_hostconf_uri = $::os_service_default,
+ ) {
+
+ include ::neutron::deps
+@@ -48,9 +58,11 @@ class neutron::plugins::ml2::opendaylight (
+ )
+
+ neutron_plugin_ml2 {
+- 'ml2_odl/username': value => $odl_username;
+- 'ml2_odl/password': value => $odl_password;
+- 'ml2_odl/url': value => $odl_url;
++ 'ml2_odl/username': value => $odl_username;
++ 'ml2_odl/password': value => $odl_password;
++ 'ml2_odl/url': value => $odl_url;
++ 'ml2_odl/port_binding_controller': value => $port_binding_controller;
++ 'ml2_odl/odl_hostconf_uri': value => $odl_hostconf_uri;
+ }
+
+ neutron_config {
diff --git a/build/puppet-neutron/lib/puppet/provider/neutron_agent_vpp/ini_setting.rb b/build/puppet-neutron/lib/puppet/provider/neutron_agent_vpp/ini_setting.rb
deleted file mode 100644
index 595904ce..00000000
--- a/build/puppet-neutron/lib/puppet/provider/neutron_agent_vpp/ini_setting.rb
+++ /dev/null
@@ -1,15 +0,0 @@
-Puppet::Type.type(:neutron_agent_vpp).provide(
- :ini_setting,
- :parent => Puppet::Type.type(:openstack_config).provider(:ini_setting)
-) do
-
- def self.file_path
- '/etc/neutron/plugins/ml2/vpp_agent.ini'
- end
-
- # added for backwards compatibility with older versions of inifile
- def file_path
- self.class.file_path
- end
-
-end \ No newline at end of file
diff --git a/build/puppet-neutron/lib/puppet/type/neutron_agent_vpp.rb b/build/puppet-neutron/lib/puppet/type/neutron_agent_vpp.rb
deleted file mode 100644
index f43a8b41..00000000
--- a/build/puppet-neutron/lib/puppet/type/neutron_agent_vpp.rb
+++ /dev/null
@@ -1,28 +0,0 @@
-Puppet::Type.newtype(:neutron_agent_vpp) do
-
- ensurable
-
- newparam(:name, :namevar => true) do
- desc 'Section/setting name to manage from vpp agent config.'
- newvalues(/\S+\/\S+/)
- end
-
- newproperty(:value) do
- desc 'The value of the setting to be defined.'
- munge do |value|
- value = value.to_s.strip
- value.capitalize! if value =~ /^(true|false)$/i
- value
- end
- end
-
- newparam(:ensure_absent_val) do
- desc 'A value that is specified as the value property will behave as if ensure => absent was specified'
- defaultto('<SERVICE DEFAULT>')
- end
-
- autorequire(:package) do
- 'networking-vpp'
- end
-
-end \ No newline at end of file
diff --git a/build/puppet-neutron/manifests/agents/ml2/networking-vpp.pp b/build/puppet-neutron/manifests/agents/ml2/networking-vpp.pp
deleted file mode 100644
index 6184e006..00000000
--- a/build/puppet-neutron/manifests/agents/ml2/networking-vpp.pp
+++ /dev/null
@@ -1,65 +0,0 @@
-# == Class: neutron::agents::ml2::networking-vpp
-#
-# Setups networking-vpp Neutron agent for ML2 plugin.
-#
-# === Parameters
-#
-# [*package_ensure*]
-# (optional) Package ensure state.
-# Defaults to 'present'.
-#
-# [*enabled*]
-# (required) Whether or not to enable the agent.
-# Defaults to true.
-#
-# [*manage_service*]
-# (optional) Whether to start/stop the service
-# Defaults to true
-#
-# [*physnets*]
-# List of <physical_network>:<physical_interface>
-# tuples mapping physical network names to agent's node-specific physical
-# network interfaces. Defaults to empty list.
-#
-# [*etcd_host*]
-# etcd server host name/ip
-# Defaults to 127.0.0.1.
-#
-# [*etcd_port*]
-# etcd server listening port.
-# Defaults to 4001.
-#
-class neutron::agents::ml2::networking-vpp (
- $package_ensure = 'present',
- $enabled = true,
- $manage_service = true,
- $physnets = '',
- $etcd_host = '127.0.0.1',
- $etcd_port = 4001,
-) {
-
- include ::neutron::params
-
- Neutron_agent_vpp<||> ~> Service['networking-vpp-agent']
-
- neutron_agent_vpp {
- 'ml2_vpp/physnets': value => $physnets;
- 'ml2_vpp/etcd_host': value => $etcd_host;
- 'ml2_vpp/etcd_port': value => $etcd_port;
- 'DEFAULT/host': value => $::fqdn;
- }
-
- if $manage_service {
- if $enabled {
- $service_ensure = 'running'
- } else {
- $service_ensure = 'stopped'
- }
- }
-
- service { 'networking-vpp-agent':
- ensure => $service_ensure,
- name => 'networking-vpp-agent',
- enable => $enabled,
- }
-} \ No newline at end of file
diff --git a/build/puppet-neutron/manifests/plugins/ml2/networking-vpp.pp b/build/puppet-neutron/manifests/plugins/ml2/networking-vpp.pp
deleted file mode 100644
index cf8fe178..00000000
--- a/build/puppet-neutron/manifests/plugins/ml2/networking-vpp.pp
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-# Install the networking-vpp ML2 mechanism driver and generate config file
-# from parameters in the other classes.
-#
-# === Parameters
-#
-# [*package_ensure*]
-# (optional) The intended state of the networking-vpp
-# package, i.e. any of the possible values of the 'ensure'
-# property for a package resource type.
-# Defaults to 'present'
-#
-# [*etcd_host*]
-# (required) etcd server host name or IP.
-# Defaults to '127.0.0.1'
-#
-# [*etcd_port*]
-# (optional) etcd server listening port.
-# Defaults to 4001.
-#
-# [*etcd_user*]
-# (optional) User name for etcd authentication
-# Defaults to ''.
-#
-# [*etcd_pass*]
-# (optional) Password for etcd authentication
-# Defaults to ''.
-#
-class neutron::plugins::ml2::networking-vpp (
- $package_ensure = 'present',
- $etcd_host = '127.0.0.1',
- $etcd_port = 4001,
- $etcd_user = '',
- $etcd_pass = '',
-) {
- require ::neutron::plugins::ml2
-
- ensure_resource('package', 'networking-vpp',
- {
- ensure => $package_ensure,
- tag => 'openstack',
- }
- )
-
- neutron_plugin_ml2 {
- 'ml2_vpp/etcd_host': value => $etcd_host;
- 'ml2_vpp/etcd_port': value => $etcd_port;
- 'ml2_vpp/etcd_user': value => $etcd_user;
- 'ml2_vpp/etcd_pass': value => $etcd_pass;
- }
-}
diff --git a/build/rpm_specs/c_capnproto.spec b/build/rpm_specs/c_capnproto.spec
new file mode 100644
index 00000000..2d95494c
--- /dev/null
+++ b/build/rpm_specs/c_capnproto.spec
@@ -0,0 +1,45 @@
+Name: c-capnproto
+Version: 0.1
+Release: 0
+Summary: C library/compiler for the Cap'n Proto serialization/RPC protocol
+
+Group: System Environment
+License: Apache 2.0
+URL: https://gerrit.opnfv.org/gerrit/apex.git
+Source0: %{name}-%{version}.tar.gz
+
+Provides: c_capnproto
+
+%description
+C library/compiler for the Cap'n Proto serialization/RPC protocol
+
+%prep
+%setup -q
+
+%build
+%configure --without-gtest
+
+%install
+rm -rf $RPM_BUILD_ROOT
+%make_install
+find %{buildroot} -name '*.la' -exec rm -f {} ';'
+find %{buildroot} -name '*.a' -exec rm -f {} ';'
+mkdir -p $RPM_BUILD_ROOT/%{_includedir}/c-capnproto/
+# These are the headers/libs quagga/zrpcd link against
+install -m 700 $RPM_BUILD_ROOT/%{_includedir}/capn.h $RPM_BUILD_ROOT/%{_includedir}/c-capnproto/
+install -m 700 $RPM_BUILD_ROOT/%{_libdir}/libcapn.so $RPM_BUILD_ROOT/%{_libdir}/libcapn_c.so
+
+%files
+%defattr(644,root,root)
+%{_bindir}/capnpc-c
+%{_includedir}/capn.h
+%{_includedir}/c-capnproto/capn.h
+%{_libdir}/libcapn.so*
+%{_libdir}/libcapn_c.so
+
+%post -p /sbin/ldconfig
+%postun -p /sbin/ldconfig
+
+%changelog
+* Mon Jan 23 2017 Tim Rozet <trozet@redhat.com> - 1.0-1
+- Initial version
diff --git a/build/rpm_specs/opnfv-apex-common.spec b/build/rpm_specs/opnfv-apex-common.spec
index db271848..ec0735fc 100644
--- a/build/rpm_specs/opnfv-apex-common.spec
+++ b/build/rpm_specs/opnfv-apex-common.spec
@@ -22,8 +22,8 @@ https://wiki.opnfv.org/apex
%setup -q
%build
-rst2html docs/installationprocedure/index.rst docs/installation-instructions.html
-rst2html docs/releasenotes/release-notes.rst docs/release-notes.html
+rst2html docs/release/installation/index.rst docs/release/installation/installation-instructions.html
+rst2html docs/release/release-notes/release-notes.rst docs/release/release-notes/release-notes.html
%global __python %{__python3}
@@ -56,6 +56,7 @@ install config/deploy/os-onos-sfc-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/
install config/deploy/os-ocl-nofeature-ha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-ocl-nofeature-ha.yaml
install config/network/network_settings.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/network_settings.yaml
install config/network/network_settings_v6.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/network_settings_v6.yaml
+install config/network/network_settings_vpp.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/network_settings_vpp.yaml
mkdir -p %{buildroot}%{_var}/opt/opnfv/lib/python/apex
@@ -86,12 +87,13 @@ install lib/installer/domain.xml %{buildroot}%{_var}/opt/opnfv/lib/installer/
mkdir -p %{buildroot}%{_docdir}/opnfv/
install LICENSE.rst %{buildroot}%{_docdir}/opnfv/
-install docs/installation-instructions.html %{buildroot}%{_docdir}/opnfv/
-install docs/releasenotes/index.rst %{buildroot}%{_docdir}/opnfv/release-notes.rst
-install docs/release-notes.html %{buildroot}%{_docdir}/opnfv/
+install docs/release/installation/installation-instructions.html %{buildroot}%{_docdir}/opnfv/
+install docs/release/release-notes/index.rst %{buildroot}%{_docdir}/opnfv/release-notes.rst
+install docs/release/release-notes/release-notes.html %{buildroot}%{_docdir}/opnfv/
install config/deploy/deploy_settings.yaml %{buildroot}%{_docdir}/opnfv/deploy_settings.yaml.example
install config/network/network_settings.yaml %{buildroot}%{_docdir}/opnfv/network_settings.yaml.example
install config/network/network_settings_v6.yaml %{buildroot}%{_docdir}/opnfv/network_settings_v6.yaml.example
+install config/network/network_settings_vpp.yaml %{buildroot}%{_docdir}/opnfv/network_settings_vpp.yaml.example
install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/inventory.yaml.example
%files
@@ -131,6 +133,7 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%{_sysconfdir}/opnfv-apex/os-ocl-nofeature-ha.yaml
%{_sysconfdir}/opnfv-apex/network_settings.yaml
%{_sysconfdir}/opnfv-apex/network_settings_v6.yaml
+%{_sysconfdir}/opnfv-apex/network_settings_vpp.yaml
%doc %{_docdir}/opnfv/LICENSE.rst
%doc %{_docdir}/opnfv/installation-instructions.html
%doc %{_docdir}/opnfv/release-notes.rst
@@ -138,9 +141,12 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
%doc %{_docdir}/opnfv/deploy_settings.yaml.example
%doc %{_docdir}/opnfv/network_settings.yaml.example
%doc %{_docdir}/opnfv/network_settings_v6.yaml.example
+%doc %{_docdir}/opnfv/network_settings_vpp.yaml.example
%doc %{_docdir}/opnfv/inventory.yaml.example
%changelog
+* Tue Feb 14 2017 Feng Pan <fpan@redhat.com> - 4.0-4
+- Add network_settings_vpp.yaml
* Fri Feb 3 2017 Nikolas Hermanns <nikolas.hermanns@ericsson.com> - 4.0-3
- change odl_l3-gluon-noha to odl-gluon-noha
* Thu Feb 2 2017 Feng Pan <fpan@redhat.com> - 4.0-2
diff --git a/build/rpm_specs/quagga.spec b/build/rpm_specs/quagga.spec
new file mode 100644
index 00000000..c129e0a2
--- /dev/null
+++ b/build/rpm_specs/quagga.spec
@@ -0,0 +1,748 @@
+# configure options
+#
+# Some can be overriden on rpmbuild commandline with:
+# rpmbuild --define 'variable value'
+# (use any value, ie 1 for flag "with_XXXX" definitions)
+#
+# E.g. rpmbuild --define 'release_rev 02' may be useful if building
+# rpms again and again on the same day, so the newer rpms can be installed.
+# bumping the number each time.
+
+####################### Quagga configure options #########################
+# with-feature options
+%{!?with_snmp: %global with_snmp 1 }
+%{!?with_vtysh: %global with_vtysh 1 }
+%{!?with_tcp_zebra: %global with_tcp_zebra 0 }
+%{!?with_vtysh: %global with_vtysh 1 }
+%{!?with_pam: %global with_pam 1 }
+%{!?with_ospfclient: %global with_ospfclient 1 }
+%{!?with_ospfapi: %global with_ospfapi 1 }
+%{!?with_irdp: %global with_irdp 1 }
+%{!?with_rtadv: %global with_rtadv 1 }
+%{!?with_isisd: %global with_isisd 1 }
+%{!?with_pimd: %global with_pimd 1 }
+%{!?with_shared: %global with_shared 1 }
+%{!?with_multipath: %global with_multipath 64 }
+%{!?quagga_user: %global quagga_user quagga }
+%{!?vty_group: %global vty_group quaggavt }
+%{!?with_fpm: %global with_fpm 0 }
+%{!?with_watchquagga: %global with_watchquagga 1 }
+
+# path defines
+%define _sysconfdir /etc/quagga
+%define zeb_src %{_builddir}/%{name}-%{quaggaversion}
+%define zeb_rh_src %{zeb_src}/redhat
+%define zeb_docs %{zeb_src}/doc
+
+# defines for configure
+%define _localstatedir /var/run/quagga
+############################################################################
+
+#### Version String tweak
+# Remove invalid characters form version string and replace with _
+%{expand: %%define rpmversion %(echo '1.1.0-dev' | tr [:blank:]- _ )}
+%define quaggaversion 1.1.0-dev
+
+#### Check version of texi2html
+# Old versions don't support "--number-footnotes" option.
+%{expand: %%global texi2htmlversion %(rpm -q --qf '%%{VERSION}' texi2html | cut -d. -f1 )}
+
+#### Check for systemd or init.d (upstart)
+# Check for init.d (upstart) as used in CentOS 6 or systemd (ie CentOS 7)
+%{expand: %%global initsystem %(if [[ `/sbin/init --version 2> /dev/null` =~ upstart ]]; then echo upstart; elif [[ `systemctl` =~ -\.mount ]]; then echo systemd; fi)}
+#
+# If init system is systemd, then always disable watchquagga
+#
+%if "%{initsystem}" == "systemd"
+ # Note: For systems with systemd, watchquagga will NOT be built. Systemd
+ # takes over the role of restarting crashed processes. Value will
+ # be overwritten with 0 below for systemd independent on the setting here
+ %global with_watchquagga 0
+%endif
+
+# if FPM is enabled, then enable tcp_zebra as well
+#
+%if %{with_fpm}
+ %global with_tcp_zebra 1
+%endif
+
+# misc internal defines
+%{!?quagga_uid: %define quagga_uid 92 }
+%{!?quagga_gid: %define quagga_gid 92 }
+%{!?vty_gid: %define vty_gid 85 }
+
+%define daemon_list zebra ripd ospfd bgpd
+
+%define daemonv6_list ripngd ospf6d
+
+%if %{with_isisd}
+%define daemon_isisd isisd
+%else
+%define daemon_isisd ""
+%endif
+
+%if %{with_pimd}
+%define daemon_pimd pimd
+%else
+%define daemon_pimd ""
+%endif
+
+%if %{with_watchquagga}
+%define daemon_watchquagga watchquagga
+%else
+%define daemon_watchquagga ""
+%endif
+
+%define all_daemons %{daemon_list} %{daemonv6_list} %{daemon_isisd} %{daemon_pimd} %{daemon_watchquagga}
+
+# allow build dir to be kept
+%{!?keep_build: %global keep_build 0 }
+
+#release sub-revision (the two digits after the CONFDATE)
+%{!?release_rev: %define release_rev 01 }
+
+Summary: Routing daemon
+Name: quagga
+Version: %{rpmversion}
+Release: 20170120%{release_rev}%{?dist}
+License: GPLv2+
+Group: System Environment/Daemons
+Source0: quagga-1.1.0-dev.tar.gz
+Source1: bgpd.conf
+URL: http://www.quagga.net
+Requires: ncurses
+Requires(pre): /sbin/install-info
+Requires(preun): /sbin/install-info
+Requires(post): /sbin/install-info
+BuildRequires: texi2html texinfo autoconf patch libcap-devel groff
+%if %{with_snmp}
+BuildRequires: net-snmp-devel
+Requires: net-snmp
+%endif
+%if %{with_vtysh}
+BuildRequires: readline readline-devel ncurses ncurses-devel
+Requires: ncurses
+%endif
+%if %{with_pam}
+BuildRequires: pam-devel
+Requires: pam
+%endif
+%if "%{initsystem}" == "systemd"
+BuildRequires: systemd
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+%else
+# Initscripts > 5.60 is required for IPv6 support
+Requires(pre): initscripts >= 5.60
+%endif
+Provides: routingdaemon = %{version}-%{release}
+BuildRoot: %{_tmppath}/%{name}-%{version}-root
+Obsoletes: bird gated mrt zebra quagga-sysvinit
+
+%description
+Quagga is a free software that manages TCP/IP based routing
+protocol. It takes multi-server and multi-thread approach to resolve
+the current complexity of the Internet.
+
+Quagga supports BGP4, OSPFv2, OSPFv3, ISIS, RIP, RIPng and PIM.
+
+Quagga is intended to be used as a Route Server and a Route Reflector. It is
+not a toolkit, it provides full routing power under a new architecture.
+Quagga by design has a process for each protocol.
+
+Quagga is a fork of GNU Zebra.
+
+%package contrib
+Summary: contrib tools for quagga
+Group: System Environment/Daemons
+
+%description contrib
+Contributed/3rd party tools which may be of use with quagga.
+
+%package devel
+Summary: Header and object files for quagga development
+Group: System Environment/Daemons
+Requires: %{name} = %{version}-%{release}
+
+%description devel
+The quagga-devel package contains the header and object files neccessary for
+developing OSPF-API and quagga applications.
+
+%prep
+%setup -q -n quagga-%{quaggaversion}
+
+%build
+
+# For standard gcc verbosity, uncomment these lines:
+#CFLAGS="%{optflags} -Wall -Wsign-compare -Wpointer-arith"
+#CFLAGS="${CFLAGS} -Wbad-function-cast -Wwrite-strings"
+
+# For ultra gcc verbosity, uncomment these lines also:
+#CFLAGS="${CFLAGS} -W -Wcast-qual -Wstrict-prototypes"
+#CFLAGS="${CFLAGS} -Wmissing-declarations -Wmissing-noreturn"
+#CFLAGS="${CFLAGS} -Wmissing-format-attribute -Wunreachable-code"
+#CFLAGS="${CFLAGS} -Wpacked -Wpadded"
+
+%configure \
+ --sysconfdir=%{_sysconfdir} \
+ --libdir=%{_libdir} \
+ --libexecdir=%{_libexecdir} \
+ --localstatedir=%{_localstatedir} \
+ --disable-werror \
+%if !%{with_shared}
+ --disable-shared \
+%endif
+%if %{with_snmp}
+ --enable-snmp \
+%endif
+%if %{with_multipath}
+ --enable-multipath=%{with_multipath} \
+%endif
+%if %{with_tcp_zebra}
+ --enable-tcp-zebra \
+%endif
+%if %{with_vtysh}
+ --enable-vtysh \
+%endif
+%if %{with_ospfclient}
+ --enable-ospfclient=yes \
+%else
+ --enable-ospfclient=no\
+%endif
+%if %{with_ospfapi}
+ --enable-ospfapi=yes \
+%else
+ --enable-ospfapi=no \
+%endif
+%if %{with_irdp}
+ --enable-irdp=yes \
+%else
+ --enable-irdp=no \
+%endif
+%if %{with_rtadv}
+ --enable-rtadv=yes \
+%else
+ --enable-rtadv=no \
+%endif
+%if %{with_isisd}
+ --enable-isisd \
+%else
+ --disable-isisd \
+%endif
+%if %{with_pam}
+ --with-libpam \
+%endif
+%if 0%{?quagga_user:1}
+ --enable-user=%quagga_user \
+ --enable-group=%quagga_user \
+%endif
+%if 0%{?vty_group:1}
+ --enable-vty-group=%vty_group \
+%endif
+%if %{with_fpm}
+ --enable-fpm \
+%else
+ --disable-fpm \
+%endif
+%if %{with_watchquagga}
+ --enable-watchquagga \
+%else
+ --disable-watchquagga \
+%endif
+ --enable-gcc-rdynamic \
+ --with-ccapnproto \
+ --with-zeromq
+
+make %{?_smp_mflags} MAKEINFO="makeinfo --no-split"
+
+pushd doc
+%if %{texi2htmlversion} < 5
+texi2html --number-sections quagga.texi
+%else
+texi2html --number-footnotes --number-sections quagga.texi
+%endif
+popd
+
+%install
+mkdir -p %{buildroot}/etc/{quagga,sysconfig,logrotate.d,pam.d} \
+ %{buildroot}/var/log/quagga %{buildroot}%{_infodir}
+make DESTDIR=%{buildroot} INSTALL="install -p" CP="cp -p" install
+install %{SOURCE1} %{buildroot}/etc/quagga/bgpd.conf
+
+# Remove this file, as it is uninstalled and causes errors when building on RH9
+rm -rf %{buildroot}/usr/share/info/dir
+
+# install /etc sources
+%if "%{initsystem}" == "systemd"
+mkdir -p %{buildroot}%{_unitdir}
+for daemon in %{all_daemons} ; do
+ if [ x"${daemon}" != x"" ] ; then
+ install %{zeb_rh_src}/${daemon}.service \
+ %{buildroot}%{_unitdir}/${daemon}.service
+ fi
+done
+%else
+mkdir -p %{buildroot}/etc/rc.d/init.d
+for daemon in %{all_daemons} ; do
+ if [ x"${daemon}" != x"" ] ; then
+ install %{zeb_rh_src}/${daemon}.init \
+ %{buildroot}/etc/rc.d/init.d/${daemon}
+ fi
+done
+%endif
+
+install -m644 %{zeb_rh_src}/quagga.pam \
+ %{buildroot}/etc/pam.d/quagga
+install -m644 %{zeb_rh_src}/quagga.logrotate \
+ %{buildroot}/etc/logrotate.d/quagga
+install -m644 %{zeb_rh_src}/quagga.sysconfig \
+ %{buildroot}/etc/sysconfig/quagga
+install -d -m750 %{buildroot}/var/run/quagga
+
+%pre
+# add vty_group
+%if 0%{?vty_group:1}
+if getent group %vty_group > /dev/null ; then : ; else \
+ /usr/sbin/groupadd -r -g %vty_gid %vty_group > /dev/null || : ; fi
+%endif
+
+# add quagga user and group
+%if 0%{?quagga_user:1}
+# Ensure that quagga_gid gets correctly allocated
+if getent group %quagga_user >/dev/null; then : ; else \
+ /usr/sbin/groupadd -g %quagga_gid %quagga_user > /dev/null || : ; \
+fi
+if getent passwd %quagga_user >/dev/null ; then : ; else \
+ /usr/sbin/useradd -u %quagga_uid -g %quagga_gid \
+ -M -r -s /sbin/nologin -c "Quagga routing suite" \
+ -d %_localstatedir %quagga_user 2> /dev/null || : ; \
+fi
+%endif
+
+%post
+# zebra_spec_add_service <service name> <port/proto> <comment>
+# e.g. zebra_spec_add_service zebrasrv 2600/tcp "zebra service"
+
+zebra_spec_add_service ()
+{
+ # Add port /etc/services entry if it isn't already there
+ if [ -f /etc/services ] && \
+ ! %__sed -e 's/#.*$//' /etc/services | %__grep -wq $1 ; then
+ echo "$1 $2 # $3" >> /etc/services
+ fi
+}
+
+zebra_spec_add_service zebrasrv 2600/tcp "zebra service"
+zebra_spec_add_service zebra 2601/tcp "zebra vty"
+zebra_spec_add_service ripd 2602/tcp "RIPd vty"
+zebra_spec_add_service ripngd 2603/tcp "RIPngd vty"
+zebra_spec_add_service ospfd 2604/tcp "OSPFd vty"
+zebra_spec_add_service bgpd 2605/tcp "BGPd vty"
+zebra_spec_add_service ospf6d 2606/tcp "OSPF6d vty"
+%if %{with_ospfapi}
+zebra_spec_add_service ospfapi 2607/tcp "OSPF-API"
+%endif
+%if %{with_isisd}
+zebra_spec_add_service isisd 2608/tcp "ISISd vty"
+%endif
+%if %{with_pimd}
+zebra_spec_add_service pimd 2611/tcp "PIMd vty"
+%endif
+
+%if "%{initsystem}" == "systemd"
+for daemon in %all_daemons ; do
+ %systemd_post ${daemon}.service
+done
+%else
+for daemon in %all_daemons ; do
+ /sbin/chkconfig --add ${daemon}
+done
+%endif
+
+/sbin/install-info %{_infodir}/quagga.info.gz %{_infodir}/dir
+
+# Create dummy files if they don't exist so basic functions can be used.
+if [ ! -e %{_sysconfdir}/zebra.conf ]; then
+ echo "hostname `hostname`" > %{_sysconfdir}/zebra.conf
+%if 0%{?quagga_user:1}
+ chown %quagga_user:%quagga_user %{_sysconfdir}/zebra.conf*
+%endif
+ chmod 640 %{_sysconfdir}/zebra.conf
+fi
+for daemon in %{all_daemons} ; do
+ if [ ! -e %{_sysconfdir}/${daemon}.conf ]; then
+ touch %{_sysconfdir}/${daemon}.conf
+ %if 0%{?quagga_user:1}
+ chown %quagga_user:%quagga_user %{_sysconfdir}/${daemon}.conf*
+ %endif
+ fi
+done
+%if %{with_watchquagga}
+ # No config for watchquagga - this is part of /etc/sysconfig/quagga
+ rm -f %{_sysconfdir}/watchquagga.*
+%endif
+
+if [ ! -e %{_sysconfdir}/vtysh.conf ]; then
+ touch %{_sysconfdir}/vtysh.conf
+ chmod 640 %{_sysconfdir}/vtysh.conf
+%if 0%{?vty_group:1}
+ chown quagga:%{vty_group} %{_sysconfdir}/vtysh.conf*
+%endif
+fi
+
+%postun
+if [ "$1" -ge 1 ]; then
+ # Find out which daemons need to be restarted.
+ for daemon in %all_daemons ; do
+ if [ -f /var/lock/subsys/${daemon} ]; then
+ eval restart_${daemon}=yes
+ else
+ eval restart_${daemon}=no
+ fi
+ done
+ # Rename restart flags for daemons handled specially.
+ running_zebra="$restart_zebra"
+ restart_zebra=no
+ %if %{with_watchquagga}
+ running_watchquagga="$restart_watchquagga"
+ restart_watchquagga=no
+ %endif
+
+ %if "%{initsystem}" == "systemd"
+ ##
+ ## Systemd Version
+ ##
+ # No watchquagga for systemd version
+ #
+ # Stop all daemons other than zebra.
+ for daemon in %all_daemons ; do
+ eval restart=\$restart_${daemon}
+ [ "$restart" = yes ] && \
+ %systemd_postun ${daemon}.service
+ done
+ # Restart zebra.
+ [ "$running_zebra" = yes ] && \
+ %systemd_postun_with_restart $daemon.service
+ # Start all daemons other than zebra.
+ for daemon in %all_daemons ; do
+ eval restart=\$restart_${daemon}
+ [ "$restart" = yes ] && \
+ %systemd_post ${daemon}.service
+ done
+ %else
+ ##
+ ## init.d Version
+ ##
+ %if %{with_watchquagga}
+ # Stop watchquagga first.
+ [ "$running_watchquagga" = yes ] && \
+ /etc/rc.d/init.d/watchquagga stop >/dev/null 2>&1
+ %endif
+ # Stop all daemons other than zebra and watchquagga.
+ for daemon in %all_daemons ; do
+ eval restart=\$restart_${daemon}
+ [ "$restart" = yes ] && \
+ /etc/rc.d/init.d/${daemon} stop >/dev/null 2>&1
+ done
+ # Restart zebra.
+ [ "$running_zebra" = yes ] && \
+ /etc/rc.d/init.d/zebra restart >/dev/null 2>&1
+ # Start all daemons other than zebra and watchquagga.
+ for daemon in %all_daemons ; do
+ eval restart=\$restart_${daemon}
+ [ "$restart" = yes ] && \
+ /etc/rc.d/init.d/${daemon} start >/dev/null 2>&1
+ done
+ %if %{with_watchquagga}
+ # Start watchquagga last.
+ # Avoid postun scriptlet error if watchquagga is not running.
+ [ "$running_watchquagga" = yes ] && \
+ /etc/rc.d/init.d/watchquagga start >/dev/null 2>&1 || :
+ %endif
+ %endif
+fi
+
+%preun
+%if "%{initsystem}" == "systemd"
+ ##
+ ## Systemd Version
+ ##
+ if [ "$1" = "0" ]; then
+ for daemon in %all_daemons ; do
+ %systemd_preun ${daemon}.service
+ done
+ fi
+%else
+ ##
+ ## init.d Version
+ ##
+ if [ "$1" = "0" ]; then
+ for daemon in %all_daemons ; do
+ /etc/rc.d/init.d/${daemon} stop >/dev/null 2>&1
+ /sbin/chkconfig --del ${daemon}
+ done
+ fi
+%endif
+/sbin/install-info --delete %{_infodir}/quagga.info.gz %{_infodir}/dir
+
+%clean
+%if !0%{?keep_build:1}
+rm -rf %{buildroot}
+%endif
+
+%files
+%defattr(-,root,root)
+%doc */*.sample* AUTHORS COPYING
+%doc doc/quagga.html
+%doc doc/mpls
+%doc ChangeLog INSTALL NEWS README REPORTING-BUGS SERVICES TODO
+%if 0%{?quagga_user:1}
+%dir %attr(751,%quagga_user,%quagga_user) %{_sysconfdir}
+%dir %attr(750,%quagga_user,%quagga_user) /var/log/quagga
+%dir %attr(751,%quagga_user,%quagga_user) /var/run/quagga
+%attr(750,%quagga_user,%quagga_user) %{_sysconfdir}/bgpd.conf
+%else
+%dir %attr(750,root,root) %{_sysconfdir}
+%dir %attr(750,root,root) /var/log/quagga
+%dir %attr(750,root,root) /var/run/quagga
+%endif
+%if 0%{?vty_group:1}
+%attr(750,%quagga_user,%vty_group) %{_sysconfdir}/vtysh.conf.sample
+%endif
+%{_infodir}/quagga.info.gz
+%{_mandir}/man*/*
+%{_sbindir}/zebra
+%{_sbindir}/ospfd
+%{_sbindir}/ripd
+%{_sbindir}/bgpd
+%if %{with_watchquagga}
+ %{_sbindir}/watchquagga
+%endif
+%{_sbindir}/ripngd
+%{_sbindir}/ospf6d
+%if %{with_pimd}
+%{_sbindir}/pimd
+%endif
+%if %{with_isisd}
+%{_sbindir}/isisd
+%endif
+%if %{with_shared}
+%attr(755,root,root) %{_libdir}/lib*.so
+%attr(755,root,root) %{_libdir}/lib*.so.*
+%endif
+%if %{with_vtysh}
+%{_bindir}/*
+%endif
+%config /etc/quagga/[!v]*
+%if "%{initsystem}" == "systemd"
+ %config %{_unitdir}/*.service
+%else
+ %config /etc/rc.d/init.d/zebra
+ %if %{with_watchquagga}
+ %config /etc/rc.d/init.d/watchquagga
+ %endif
+ %config /etc/rc.d/init.d/ripd
+ %config /etc/rc.d/init.d/ospfd
+ %config /etc/rc.d/init.d/bgpd
+ %config /etc/rc.d/init.d/ripngd
+ %config /etc/rc.d/init.d/ospf6d
+ %if %{with_isisd}
+ %config /etc/rc.d/init.d/isisd
+ %endif
+ %if %{with_pimd}
+ %config /etc/rc.d/init.d/pimd
+ %endif
+%endif
+%config(noreplace) /etc/sysconfig/quagga
+%config(noreplace) /etc/pam.d/quagga
+%config(noreplace) %attr(640,root,root) /etc/logrotate.d/*
+
+%files contrib
+%defattr(-,root,root)
+%doc tools
+
+%files devel
+%defattr(-,root,root)
+%if %{with_ospfclient}
+%{_sbindir}/ospfclient
+%endif
+%{_libdir}/*.a
+%{_libdir}/*.la
+%dir %attr(755,root,root) %{_includedir}/%{name}
+%{_includedir}/%name/*.h
+%dir %attr(755,root,root) %{_includedir}/%{name}/ospfd
+%{_includedir}/%name/ospfd/*.h
+%if %{with_ospfapi}
+%dir %attr(755,root,root) %{_includedir}/%{name}/ospfapi
+%{_includedir}/%name/ospfapi/*.h
+%endif
+
+%changelog
+* Thu Feb 11 2016 Paul Jakma <paul@jakma.org> - %{version}
+- remove with_ipv6 conditionals, always build v6
+- Fix UTF-8 char in spec changelog
+- remove quagga.pam.stack, long deprecated.
+
+* Thu Oct 22 2015 Martin Winter <mwinter@opensourcerouting.org>
+- Cleanup configure: remove --enable-ipv6 (default now), --enable-nssa,
+ --enable-netlink
+- Remove support for old fedora 4/5
+- Fix for package nameing
+- Fix Weekdays of previous changelogs (bogus dates)
+- Add conditional logic to only build tex footnotes with supported texi2html
+- Added pimd to files section and fix double listing of /var/lib*/quagga
+- Numerous fixes to unify upstart/systemd startup into same spec file
+- Only allow use of watchquagga for non-systemd systems. no need with systemd
+
+* Fri Sep 4 2015 Paul Jakma <paul@jakma.org>
+- buildreq updates
+- add a default define for with_pimd
+
+* Mon Sep 12 2005 Paul Jakma <paul@dishone.st>
+- Steal some changes from Fedora spec file:
+- Add with_rtadv variable
+- Test for groups/users with getent before group/user adding
+- Readline need not be an explicit prerequisite
+- install-info delete should be postun, not preun
+
+* Wed Jan 12 2005 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
+- on package upgrade, implement careful, phased restart logic
+- use gcc -rdynamic flag when linking for better backtraces
+
+* Wed Dec 22 2004 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
+- daemonv6_list should contain only IPv6 daemons
+
+* Wed Dec 22 2004 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
+- watchquagga added
+- on upgrade, all daemons should be condrestart'ed
+- on removal, all daemons should be stopped
+
+* Mon Nov 08 2004 Paul Jakma <paul@dishone.st>
+- Use makeinfo --html to generate quagga.html
+
+* Sun Nov 07 2004 Paul Jakma <paul@dishone.st>
+- Fix with_ipv6 set to 0 build
+
+* Sat Oct 23 2004 Paul Jakma <paul@dishone.st>
+- Update to 0.97.2
+
+* Sat Oct 23 2004 Andrew J. Schorr <aschorr@telemetry-investments.com>
+- Make directories be owned by the packages concerned
+- Update logrotate scripts to use correct path to killall and use pid files
+
+* Fri Oct 08 2004 Paul Jakma <paul@dishone.st>
+- Update to 0.97.0
+
+* Wed Sep 15 2004 Paul Jakma <paul@dishone.st>
+- build snmp support by default
+- build irdp support
+- build with shared libs
+- devel subpackage for archives and headers
+
+* Thu Jan 08 2004 Paul Jakma <paul@dishone.st>
+- updated sysconfig files to specify local dir
+- added ospf_dump.c crash quick fix patch
+- added ospfd persistent interface configuration patch
+
+* Tue Dec 30 2003 Paul Jakma <paul@dishone.st>
+- sync to CVS
+- integrate RH sysconfig patch to specify daemon options (RH)
+- default to have vty listen only to 127.1 (RH)
+- add user with fixed UID/GID (RH)
+- create user with shell /sbin/nologin rather than /bin/false (RH)
+- stop daemons on uninstall (RH)
+- delete info file on preun, not postun to avoid deletion on upgrade. (RH)
+- isisd added
+- cleanup tasks carried out for every daemon
+
+* Sun Nov 2 2003 Paul Jakma <paul@dishone.st>
+- Fix -devel package to include all files
+- Sync to 0.96.4
+
+* Tue Aug 12 2003 Paul Jakma <paul@dishone.st>
+- Renamed to Quagga
+- Sync to Quagga release 0.96
+
+* Thu Mar 20 2003 Paul Jakma <paul@dishone.st>
+- zebra privileges support
+
+* Tue Mar 18 2003 Paul Jakma <paul@dishone.st>
+- Fix mem leak in 'show thread cpu'
+- Ralph Keller's OSPF-API
+- Amir: Fix configure.ac for net-snmp
+
+* Sat Mar 1 2003 Paul Jakma <paul@dishone.st>
+- ospfd IOS prefix to interface matching for 'network' statement
+- temporary fix for PtP and IPv6
+- sync to zebra.org CVS
+
+* Mon Jan 20 2003 Paul Jakma <paul@dishone.st>
+- update to latest cvs
+- Yon's "show thread cpu" patch - 17217
+- walk up tree - 17218
+- ospfd NSSA fixes - 16681
+- ospfd nsm fixes - 16824
+- ospfd OLSA fixes and new feature - 16823
+- KAME and ifindex fixes - 16525
+- spec file changes to allow redhat files to be in tree
+
+* Sat Dec 28 2002 Alexander Hoogerhuis <alexh@ihatent.com>
+- Added conditionals for building with(out) IPv6, vtysh, RIP, BGP
+- Fixed up some build requirements (patch)
+- Added conditional build requirements for vtysh / snmp
+- Added conditional to files for _bindir depending on vtysh
+
+* Mon Nov 11 2002 Paul Jakma <paulj@alphyra.ie>
+- update to latest CVS
+- add Greg Troxel's md5 buffer copy/dup fix
+- add RIPv1 fix
+- add Frank's multicast flag fix
+
+* Wed Oct 09 2002 Paul Jakma <paulj@alphyra.ie>
+- update to latest CVS
+- timestamped crypt_seqnum patch
+- oi->on_write_q fix
+
+* Mon Sep 30 2002 Paul Jakma <paulj@alphyra.ie>
+- update to latest CVS
+- add vtysh 'write-config (integrated|daemon)' patch
+- always 'make rebuild' in vtysh/ to catch new commands
+
+* Fri Sep 13 2002 Paul Jakma <paulj@alphyra.ie>
+- update to 0.93b
+
+* Wed Sep 11 2002 Paul Jakma <paulj@alphyra.ie>
+- update to latest CVS
+- add "/sbin/ip route flush proto zebra" to zebra RH init on startup
+
+* Sat Aug 24 2002 Paul Jakma <paulj@alphyra.ie>
+- update to current CVS
+- add OSPF point to multipoint patch
+- add OSPF bugfixes
+- add BGP hash optimisation patch
+
+* Fri Jun 14 2002 Paul Jakma <paulj@alphyra.ie>
+- update to 0.93-pre1 / CVS
+- add link state detection support
+- add generic PtP and RFC3021 support
+- various bug fixes
+
+* Thu Aug 09 2001 Elliot Lee <sopwith@redhat.com> 0.91a-6
+- Fix bug #51336
+
+* Wed Aug 1 2001 Trond Eivind Glomsrød <teg@redhat.com> 0.91a-5
+- Use generic initscript strings instead of initscript specific
+ ( "Starting foo: " -> "Starting $prog:" )
+
+* Fri Jul 27 2001 Elliot Lee <sopwith@redhat.com> 0.91a-4
+- Bump the release when rebuilding into the dist.
+
+* Tue Feb 6 2001 Tim Powers <timp@redhat.com>
+- built for Powertools
+
+* Sun Feb 4 2001 Pekka Savola <pekkas@netcore.fi>
+- Hacked up from PLD Linux 0.90-1, Mandrake 0.90-1mdk and one from zebra.org.
+- Update to 0.91a
+- Very heavy modifications to init.d/*, .spec, pam, i18n, logrotate, etc.
+- Should be quite Red Hat'isque now.
diff --git a/build/rpm_specs/zrpc.spec b/build/rpm_specs/zrpc.spec
new file mode 100644
index 00000000..a8dd3b75
--- /dev/null
+++ b/build/rpm_specs/zrpc.spec
@@ -0,0 +1,46 @@
+Name: zrpcd
+Version: 0.2
+Release: 0
+
+Summary: Zebra Remote Procedure Call
+Group: Applications/Internet
+License: GPL
+Source0: %{name}-%{version}.tar.gz
+Source1: zrpcd.service
+
+BuildRequires: systemd-units
+
+Requires: thrift zeromq glib2 c-capnproto capnproto quagga
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+%description
+ZRPC provides a Thrift API and handles RPC to configure Quagga framework.
+
+%prep
+%setup -q
+
+%build
+
+%configure
+
+%install
+mkdir -p %{buildroot}%{_unitdir}
+install -p -D -m 644 %{SOURCE1} %{buildroot}%{_unitdir}/zrpcd.service
+%make_install
+
+%post
+%systemd_post zrpcd.service
+
+%preun
+%systemd_preun zrpcd.service
+
+%postun
+%systemd_postun_with_restart zrpcd.service
+
+%files
+%defattr(-,root,root)
+%{_sbindir}/zrpcd
+%{_includedir}/%name/zrpc_global.h
+%{_includedir}/%name/zrpc_os_wrapper.h
+%{_unitdir}/zrpcd.service
diff --git a/build/set_perf_images.sh b/build/set_perf_images.sh
deleted file mode 100644
index d91c20ec..00000000
--- a/build/set_perf_images.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-
-##############################################################################
-# Copyright (c) 2016 Red Hat Inc.
-# Michael Chapman <michapma@redhat.com>, Tim Rozet <trozet@redhat.com>
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-for ROLE in $@; do
- RAMDISK=${ROLE}-bm-deploy-ramdisk
-
- if [ -f $ROLE-overcloud-full.qcow2 ]; then
- echo "Uploading ${RAMDISK}"
- glance image-create --name ${RAMDISK} --disk-format ari --container-format ari --file ${ROLE}-ironic-python-agent.initramfs --is-public True
- echo "Uploading $ROLE-overcloud-full.qcow2 "
- KERNEL=$(glance image-show overcloud-full | grep 'kernel_id' | cut -d '|' -f 3 | xargs)
- RAMDISK_ID=$(glance image-show ${RAMDISK} | grep id | awk {'print $4'})
- glance image-create --name $ROLE-overcloud-full --disk-format qcow2 --file $ROLE-overcloud-full.qcow2 --container-format bare --property ramdisk_id=$RAMDISK_ID --property kernel_id=$KERNEL --is-public True
- rm -f $ROLE-overcloud-full.qcow2
- fi
-
- if [ "$ROLE" == "Controller" ]; then
- sed -i "s/overcloud-full/Controller-overcloud-full/" opnfv-environment.yaml
- sed -i '/OvercloudControlFlavor:/c\ OvercloudControlFlavor: control' opnfv-environment.yaml
- fi
-
- if [ "$ROLE" == "Compute" ]; then
- sudo sed -i "s/NovaImage: .*/NovaImage: Compute-overcloud-full/" /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
- sudo sed -i '/OvercloudComputeFlavor:/c\ OvercloudComputeFlavor: compute' /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
- fi
-
- if [ "$ROLE" == "BlockStorage" ]; then
- sudo sed -i "s/BlockStorageImage: .*/BlockStorageImage: BlockStorage-overcloud-full/" /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml
- fi
-
- RAMDISK_ID=$(glance image-show ${RAMDISK} | grep id | awk {'print $4'})
- nodes=$(ironic node-list | awk {'print $2'} | grep -Eo [0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})
- role=$(echo $ROLE | awk '{print tolower($0)}')
- if [ "$role" == "controller" ]; then
- role="control"
- fi
- for node in $nodes; do
- if ironic node-show $node | grep profile:${role}; then
- ironic node-update $node replace driver_info/deploy_ramdisk=${RAMDISK_ID}
- fi
- done
-done
diff --git a/build/undercloud.sh b/build/undercloud.sh
index b27b9108..dbe7d2f6 100755
--- a/build/undercloud.sh
+++ b/build/undercloud.sh
@@ -24,10 +24,6 @@ pushd opnfv-tht > /dev/null
git archive --format=tar.gz --prefix=openstack-tripleo-heat-templates/ HEAD > ${BUILD_DIR}/opnfv-tht.tar.gz
popd > /dev/null
-# Add custom IPA to allow kernel params
-curl -fO https://raw.githubusercontent.com/trozet/ironic-python-agent/opnfv_kernel/ironic_python_agent/extensions/image.py
-python3 -c 'import py_compile; py_compile.compile("image.py", cfile="image.pyc")'
-
# installing forked opnfv-tht
# enabling ceph OSDs to live on the controller
# OpenWSMan package update supports the AMT Ironic driver for the TealBox
@@ -51,6 +47,7 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--run-command "yum update -y openwsman*" \
--run-command "cp /usr/share/instack-undercloud/undercloud.conf.sample /home/stack/undercloud.conf && chown stack:stack /home/stack/undercloud.conf" \
--upload ${BUILD_ROOT}/opnfv-environment.yaml:/home/stack/ \
+ --upload ${BUILD_ROOT}/first-boot.yaml:/home/stack/ \
--upload ${BUILD_ROOT}/csit-environment.yaml:/home/stack/ \
--upload ${BUILD_ROOT}/virtual-environment.yaml:/home/stack/ \
--install "python2-congressclient" \
@@ -65,10 +62,6 @@ LIBGUESTFS_BACKEND=direct virt-customize \
--install "openstack-heat-engine" \
--install "openstack-heat-api-cfn" \
--install "openstack-heat-api" \
- --upload ${BUILD_ROOT}/build_perf_image.sh:/home/stack \
- --upload ${BUILD_ROOT}/set_perf_images.sh:/home/stack \
- --upload ${BUILD_DIR}/image.py:/root \
- --upload ${BUILD_DIR}/image.pyc:/root \
--upload ${BUILD_ROOT}/0001-Removes-doing-yum-update.patch:/usr/lib/python2.7/site-packages/ \
--run-command "cd /usr/lib/python2.7/site-packages/ && patch -p1 < 0001-Removes-doing-yum-update.patch" \
--root-password password:stack \
diff --git a/build/variables.sh b/build/variables.sh
index a40eb234..c30aa2a7 100644
--- a/build/variables.sh
+++ b/build/variables.sh
@@ -10,12 +10,14 @@
BUILD_ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
BUILD_DIR="$(dirname ${BUILD_ROOT})/.build"
+QUAGGA_RPMS_DIR=${BUILD_DIR}/quagga_build_dir
CACHE_DIR="$(dirname ${BUILD_ROOT})/.cache"
CACHE_HISTORY=".cache_history"
+PATCHES_DIR="${BUILD_ROOT}/patches"
rdo_images_uri=http://buildlogs.centos.org/centos/7/cloud/x86_64/tripleo_images/newton/delorean
onos_release_uri=https://downloads.onosproject.org/nightly/
-onos_release_file=onos-1.6.0-rc2.tar.gz
+onos_release_file=onos-1.8.0-rc6.tar.gz
onos_jdk_uri=http://artifacts.opnfv.org/apex/colorado
onos_ovs_uri=http://artifacts.opnfv.org/apex/colorado
onos_ovs_pkg=package_ovs_rpm3.tar.gz