diff options
author | Dan Radez <dradez@redhat.com> | 2015-12-09 15:52:38 -0500 |
---|---|---|
committer | Dan Radez <dradez@redhat.com> | 2015-12-14 15:54:34 -0500 |
commit | 592edf4aac41985d4f802f8ee607af9171c516ed (patch) | |
tree | d838f99fe8cdc678d1c6ad1b0c6e2d971cb55af6 | |
parent | c59d834f3d2f57942f2b8d3bf86d71e53401d24f (diff) |
updating the OpenDaylight patch
- using the proposed patched for upstream
- ensuring l3_ha is disabled
- exposing support for an ODL only node
Change-Id: I6f4a50300ea08322eea4ba466fc1a3b0fbcb5743
Signed-off-by: Dan Radez <dradez@redhat.com>
-rw-r--r-- | build/Makefile | 4 | ||||
-rwxr-xr-x | build/instack.sh | 33 | ||||
-rw-r--r-- | build/opendaylight-puppet-neutron.patch | 290 | ||||
-rw-r--r-- | build/opendaylight-tripleo-heat-templates.patch | 913 | ||||
-rw-r--r-- | build/opendaylight.patch | 446 | ||||
-rw-r--r-- | build/opnfv-apex.spec | 12 | ||||
-rwxr-xr-x | ci/deploy.sh | 12 |
7 files changed, 1237 insertions, 473 deletions
diff --git a/build/Makefile b/build/Makefile index 8079932b..7fd32281 100644 --- a/build/Makefile +++ b/build/Makefile @@ -113,8 +113,8 @@ rpm: tar -u --xform="s:instackenv.json.example:opnfv-apex-$(RPMVERS)/build/instackenv.json.example:" --file=opnfv-apex.tar instackenv.json.example tar -u --xform="s:stack/overcloud-full-odl.qcow2:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full.qcow2:" --file=opnfv-apex.tar stack/overcloud-full-odl.qcow2 tar -u --xform="s:network-environment.yaml:opnfv-apex-$(RPMVERS)/build/network-environment.yaml:" --file=opnfv-apex.tar network-environment.yaml - tar -u --xform="s:opendaylight.yaml:opnfv-apex-$(RPMVERS)/build/opendaylight.yaml:" --file=opnfv-apex.tar opendaylight.yaml - tar -u --xform="s:opendaylight.patch:opnfv-apex-$(RPMVERS)/build/opendaylight.patch:" --file=opnfv-apex.tar opendaylight.patch + tar -u --xform="s:opendaylight-puppet-neutron.patch:opnfv-apex-$(RPMVERS)/build/opendaylight-puppet-neutron.patch:" --file=opnfv-apex.tar opendaylight-puppet-neutron.patch + tar -u --xform="s:opendaylight-tripleo-heat-templates.patch:opnfv-apex-$(RPMVERS)/build/opendaylight-tripleo-heat-templates.patch:" --file=opnfv-apex.tar opendaylight-tripleo-heat-templates.patch gzip -f opnfv-apex.tar rpmbuild -ba opnfv-apex.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)' -D "release $(shell echo $(REVSTATE) | tr -d '_-')" diff --git a/build/instack.sh b/build/instack.sh index 182d2367..a6e459f1 100755 --- a/build/instack.sh +++ b/build/instack.sh @@ -195,12 +195,12 @@ PACKAGES="qemu-kvm-common,qemu-kvm,libvirt-daemon-kvm,libguestfs,python-libguest PACKAGES+=",openstack-swift,openstack-ceilometer-api,openstack-neutron-ml2,openstack-ceilometer-alarm" PACKAGES+=",openstack-nova-conductor,openstack-ironic-inspector,openstack-ironic-api,python-openvswitch" PACKAGES+=",openstack-glance,python-glance,python-troveclient,openstack-puppet-modules" -PACKAGES+=",python-troveclient,openstack-neutron-openvswitch,openstack-nova-scheduler,openstack-keystone,openstack-swift-account" +PACKAGES+=",openstack-neutron,openstack-neutron-openvswitch,openstack-nova-scheduler,openstack-keystone,openstack-swift-account" PACKAGES+=",openstack-swift-container,openstack-swift-object,openstack-swift-plugin-swift3,openstack-swift-proxy" PACKAGES+=",openstack-nova-api,openstack-nova-cert,openstack-heat-api-cfn,openstack-heat-api," PACKAGES+=",openstack-ceilometer-central,openstack-ceilometer-polling,openstack-ceilometer-collector," PACKAGES+=",openstack-heat-api-cloudwatch,openstack-heat-engine,openstack-heat-common,openstack-ceilometer-notification" -PACKAGES+=",hiera,puppet,memcached,keepalived,mariadb,mariadb-server,rabbitmq-server" +PACKAGES+=",hiera,puppet,memcached,keepalived,mariadb,mariadb-server,rabbitmq-server,python-pbr" LIBGUESTFS_BACKEND=direct virt-customize --install $PACKAGES -a instack.qcow2 popd @@ -208,17 +208,17 @@ popd #Adding OpenDaylight to overcloud pushd stack +# make a copy of the cached overcloud-full image cp overcloud-full.qcow2 overcloud-full-odl.qcow2 -for i in opendaylight python-networking-odl; do - yumdownloader $i - if rpmfile=$(ls -r $i*); then - rpmfile=$(echo $rpmfile | head -n1) - LIBGUESTFS_BACKEND=direct virt-customize --upload $rpmfile:/tmp --install /tmp/$rpmfile -a overcloud-full-odl.qcow2 - else - echo "Cannot install $i into overcloud-full image." - exit 1 - fi -done + +# install nessesary packages +LIBGUESTFS_BACKEND=direct virt-customize --upload /etc/yum.repos.d/opendaylight.repo:/etc/yum.repos.d/opendaylight.repo \ + --install opendaylight,python-networking-odl -a overcloud-full-odl.qcow2 + +## WORK AROUND +## when OpenDaylight lands in upstream RDO manager this can be removed + +# upload the opendaylight puppet module rm -rf puppet-opendaylight git clone https://github.com/dfarrell07/puppet-opendaylight pushd puppet-opendaylight @@ -226,6 +226,15 @@ git archive --format=tar.gz --prefix=opendaylight/ HEAD > ../puppet-opendaylight popd LIBGUESTFS_BACKEND=direct virt-customize --upload puppet-opendaylight.tar.gz:/etc/puppet/modules/ \ --run-command "cd /etc/puppet/modules/ && tar xzf puppet-opendaylight.tar.gz" -a overcloud-full-odl.qcow2 + +# Patch in OpenDaylight installation and configuration +LIBGUESTFS_BACKEND=direct virt-customize --upload ../opendaylight-tripleo-heat-templates.patch:/tmp \ + --run-command "cd /usr/share/openstack-tripleo-heat-templates/ && patch -Np1 < /tmp/opendaylight-tripleo-heat-templates.patch" \ + -a instack.qcow2 +LIBGUESTFS_BACKEND=direct virt-customize --upload ../opendaylight-puppet-neutron.patch:/tmp \ + --run-command "cd /etc/puppet/modules/neutron && patch -Np1 < /tmp/opendaylight-puppet-neutron.patch" \ + -a overcloud-full-odl.qcow2 +## END WORK AROUND popd # move and Sanitize private keys from instack.json file diff --git a/build/opendaylight-puppet-neutron.patch b/build/opendaylight-puppet-neutron.patch new file mode 100644 index 00000000..9e0d713e --- /dev/null +++ b/build/opendaylight-puppet-neutron.patch @@ -0,0 +1,290 @@ +From 8f1ca7078619b8ab67de2580522f7174bed40774 Mon Sep 17 00:00:00 2001 +From: Tim Rozet <trozet@redhat.com> +Date: Tue, 24 Nov 2015 14:39:12 -0500 +Subject: [PATCH] Adds configuration support for OpenDaylight SDN Controller + +In order to use OpenDaylight with Neutron, ML2 must be configured to +point to the OpenDaylight controller instance. It also requires the +networking-odl python library to drive communication with ODL. +Additionally each Open vSwitch instance must be configured to set the ODL +Controller as it's manager. + +Change-Id: If067e1057bec2d48f700838d86077a550bd27bd2 +Signed-off-by: Tim Rozet <trozet@redhat.com> +--- + manifests/plugins/ml2/opendaylight.pp | 51 +++++++++++++++++ + manifests/plugins/ovs/opendaylight.pp | 63 +++++++++++++++++++++ + .../neutron_plugins_ml2_opendaylight_spec.rb | 65 ++++++++++++++++++++++ + .../neutron_plugins_ovs_opendaylight_spec.rb | 60 ++++++++++++++++++++ + 4 files changed, 239 insertions(+) + create mode 100644 manifests/plugins/ml2/opendaylight.pp + create mode 100644 manifests/plugins/ovs/opendaylight.pp + create mode 100644 spec/classes/neutron_plugins_ml2_opendaylight_spec.rb + create mode 100644 spec/classes/neutron_plugins_ovs_opendaylight_spec.rb + +diff --git a/manifests/plugins/ml2/opendaylight.pp b/manifests/plugins/ml2/opendaylight.pp +new file mode 100644 +index 0000000..7dc7937 +--- /dev/null ++++ b/manifests/plugins/ml2/opendaylight.pp +@@ -0,0 +1,51 @@ ++# ++# Install the OpenDaylight and generate config file ++# from parameters in the other classes. ++# ++# === Parameters ++# ++# [*odl_controller_ip*] ++# (required) The OpenDaylight controller IP ++# ++# [*package_ensure*] ++# (optional) The intended state of the python-networking-odl ++# package, i.e. any of the possible values of the 'ensure' ++# property for a package resource type. ++# Defaults to 'present' ++# ++# [*odl_username*] ++# (optional) The opendaylight controller username ++# Defaults to 'admin' ++# ++# [*odl_password*] ++# (optional) The opendaylight controller password ++# Defaults to 'admin' ++# ++# [*odl_port*] ++# (optional) The opendaylight controller port ++# Defaults to '8080' ++# ++class neutron::plugins::ml2::opendaylight ( ++ $odl_controller_ip, ++ $package_ensure = 'present', ++ $odl_username = 'admin', ++ $odl_password = 'admin', ++ $odl_port = '8080', ++) { ++ include ::neutron::params ++ require ::neutron::plugins::ml2 ++ ++ ensure_resource('package', 'python-networking-odl', ++ { ++ ensure => $package_ensure, ++ tag => 'openstack', ++ } ++ ) ++ ++ neutron_plugin_ml2 { ++ 'ml2_odl/username': value => $odl_username; ++ 'ml2_odl/password': value => $odl_password; ++ 'ml2_odl/url': value => "http://${odl_controller_ip}:${odl_port}/controller/nb/v2/neutron"; ++ } ++ ++} +diff --git a/manifests/plugins/ovs/opendaylight.pp b/manifests/plugins/ovs/opendaylight.pp +new file mode 100644 +index 0000000..3ebdb0e +--- /dev/null ++++ b/manifests/plugins/ovs/opendaylight.pp +@@ -0,0 +1,63 @@ ++# ++# Configure OVS to use OpenDaylight ++# ++# === Parameters ++# ++# [*odl_controller_ip*] ++# (required) The OpenDaylight controller IP ++# ++# [*tunnel_ip*] ++# (required) The IP of the host to use for tunneling ++# tenant VXLAN/GRE over ++# ++# [*odl_port*] ++# (optional) The opendaylight controller port ++# Defaults to '8080' ++# ++# [*provider_mappings*] ++# (optional) bridge mappings required if using VLAN ++# tenant type. Example: provider_mappings=br-ex:eth0 ++# Defaults to false ++# ++# [*odl_username*] ++# (optional) The opendaylight controller username ++# Defaults to 'admin' ++# ++# [*odl_password*] ++# (optional) The opendaylight controller password ++# Defaults to 'admin' ++# ++class neutron::plugins::ovs::opendaylight ( ++ $odl_controller_ip, ++ $tunnel_ip, ++ $odl_port = '8080', ++ $provider_mappings = false, ++ $odl_username = 'admin', ++ $odl_password = 'admin', ++) { ++ ++ exec { 'Wait for NetVirt OVSDB to come up': ++ command => "/bin/curl -o /dev/null --fail --silent --head -u ${odl_username}:${odl_password} \ ++ http://${odl_controller_ip}:${odl_port}/restconf/operational/network-topology:network-topology/topology/netvirt:1", ++ tries => 20, ++ try_sleep => 60, ++ } -> ++ # OVS manager ++ exec { 'Set OVS Manager to OpenDaylight': ++ command => "/usr/bin/ovs-vsctl set-manager tcp:${odl_controller_ip}:6640", ++ unless => "/usr/bin/ovs-vsctl show | /usr/bin/grep 'Manager \"tcp:${odl_controller_ip}:6640\"'", ++ } -> ++ # local ip ++ exec { 'Set local_ip Other Option': ++ command => "/usr/bin/ovs-vsctl set Open_vSwitch $(ovs-vsctl get Open_vSwitch . _uuid) other_config:local_ip=${tunnel_ip}", ++ unless => "/usr/bin/ovs-vsctl list Open_vSwitch | /usr/bin/grep 'local_ip=\"${tunnel_ip}\"'", ++ } ++ ++ # set mappings for VLAN ++ if $provider_mappings { ++ exec { 'Set provider_mappings Other Option': ++ command => "/usr/bin/ovs-vsctl set Open_vSwitch $(ovs-vsctl get Open_vSwitch . _uuid) other_config:provider_mappings=${provider_mappings}", ++ unless => "/usr/bin/ovs-vsctl list Open_vSwitch | /usr/bin/grep 'provider_mappings' | /usr/bin/grep ${provider_mappings}", ++ } ++ } ++} +diff --git a/spec/classes/neutron_plugins_ml2_opendaylight_spec.rb b/spec/classes/neutron_plugins_ml2_opendaylight_spec.rb +new file mode 100644 +index 0000000..5772b3b +--- /dev/null ++++ b/spec/classes/neutron_plugins_ml2_opendaylight_spec.rb +@@ -0,0 +1,65 @@ ++require 'spec_helper' ++ ++describe 'neutron::plugins::ml2::opendaylight' do ++ ++ let :pre_condition do ++ "class { 'neutron::server': auth_password => 'password'} ++ class { 'neutron': ++ rabbit_password => 'passw0rd', ++ core_plugin => 'neutron.plugins.ml2.plugin.Ml2Plugin' }" ++ end ++ ++ let :default_params do ++ { ++ :package_ensure => 'present', ++ :odl_username => 'admin', ++ :odl_password => 'admin', ++ :odl_port => '8080', ++ } ++ end ++ ++ let :params do ++ { ++ :odl_controller_ip => '127.0.0.1', ++ } ++ end ++ ++ let :test_facts do ++ { ++ :operatingsystem => 'default', ++ :operatingsystemrelease => 'default', ++ } ++ end ++ ++ ++ shared_examples_for 'neutron plugin opendaylight ml2' do ++ before do ++ params.merge!(default_params) ++ end ++ ++ it { is_expected.to contain_class('neutron::params') } ++ ++ it 'should have' do ++ is_expected.to contain_package('python-networking-odl').with( ++ :ensure => params[:package_ensure], ++ :tag => 'openstack' ++ ) ++ end ++ end ++ ++ context 'on RedHat platforms' do ++ let :facts do ++ test_facts.merge({:osfamily => 'RedHat'}) ++ end ++ ++ it_configures 'neutron plugin opendaylight ml2' ++ end ++ ++ context 'on Debian platforms' do ++ let :facts do ++ test_facts.merge({:osfamily => 'Debian'}) ++ end ++ ++ it_configures 'neutron plugin opendaylight ml2' ++ end ++end +diff --git a/spec/classes/neutron_plugins_ovs_opendaylight_spec.rb b/spec/classes/neutron_plugins_ovs_opendaylight_spec.rb +new file mode 100644 +index 0000000..d6b93df +--- /dev/null ++++ b/spec/classes/neutron_plugins_ovs_opendaylight_spec.rb +@@ -0,0 +1,60 @@ ++require 'spec_helper' ++ ++describe 'neutron::plugins::ovs::opendaylight' do ++ ++ let :pre_condition do ++ "class { 'neutron::server': auth_password => 'password'} ++ class { 'neutron': ++ rabbit_password => 'passw0rd', ++ core_plugin => 'neutron.plugins.ml2.plugin.Ml2Plugin' }" ++ end ++ ++ let :default_params do ++ { ++ :provider_mappings => false, ++ :odl_username => 'admin', ++ :odl_password => 'admin', ++ :odl_port => '8080', ++ } ++ end ++ ++ let :params do ++ { ++ :odl_controller_ip => '127.0.0.1', ++ :tunnel_ip => '127.0.0.1', ++ } ++ end ++ ++ let :test_facts do ++ { ++ :operatingsystem => 'default', ++ :operatingsystemrelease => 'default', ++ } ++ end ++ ++ ++ shared_examples_for 'neutron plugin opendaylight ovs' do ++ before do ++ params.merge!(default_params) ++ end ++ ++ it { is_expected.to contain_class('neutron::params') } ++ ++ end ++ ++ context 'on RedHat platforms' do ++ let :facts do ++ test_facts.merge({:osfamily => 'RedHat'}) ++ end ++ ++ it_configures 'neutron plugin opendaylight ovs' ++ end ++ ++ context 'on Debian platforms' do ++ let :facts do ++ test_facts.merge({:osfamily => 'Debian'}) ++ end ++ ++ it_configures 'neutron plugin opendaylight ovs' ++ end ++end +-- +2.5.0 + diff --git a/build/opendaylight-tripleo-heat-templates.patch b/build/opendaylight-tripleo-heat-templates.patch new file mode 100644 index 00000000..2a7176e4 --- /dev/null +++ b/build/opendaylight-tripleo-heat-templates.patch @@ -0,0 +1,913 @@ +From bdda149ddf16b5818882993d9df1e66a2d61a383 Mon Sep 17 00:00:00 2001 +From: Dan Radez <dradez@redhat.com> +Date: Sun, 13 Dec 2015 21:20:40 -0500 +Subject: [PATCH] Adds OpenDaylight support + +To enable OpenDaylight on controllers use environments/opendaylight.yaml +To enable OpenDaylight on external node use +environments/opendaylight-external.yaml +--- + environments/opendaylight-external.yaml | 21 +++ + environments/opendaylight.yaml | 18 ++ + overcloud-resource-registry-puppet.yaml | 3 + + overcloud-without-mergepy.yaml | 62 ++++++ + puppet/all-nodes-config.yaml | 6 + + puppet/compute.yaml | 19 ++ + puppet/controller.yaml | 27 +++ + puppet/manifests/overcloud_compute.pp | 27 ++- + puppet/manifests/overcloud_controller.pp | 47 ++++- + puppet/manifests/overcloud_controller_pacemaker.pp | 184 +++++++++++------- + puppet/manifests/overcloud_opendaylight.pp | 26 +++ + puppet/opendaylight-puppet.yaml | 209 +++++++++++++++++++++ + 12 files changed, 569 insertions(+), 80 deletions(-) + create mode 100644 environments/opendaylight-external.yaml + create mode 100644 environments/opendaylight.yaml + create mode 100644 puppet/manifests/overcloud_opendaylight.pp + create mode 100644 puppet/opendaylight-puppet.yaml + +diff --git a/environments/opendaylight-external.yaml b/environments/opendaylight-external.yaml +new file mode 100644 +index 0000000..46505b4 +--- /dev/null ++++ b/environments/opendaylight-external.yaml +@@ -0,0 +1,21 @@ ++# Environment file used to enable OpenDaylight ++# Currently uses overcloud image that is assumed ++# to be virt-customized with ODL RPM already on it ++ ++# These parameters customize the OpenDaylight Node ++# The user name and password are for the ODL service ++# Defaults are included here for reference ++#parameter_defaults: ++# OpenDaylightFlavor: baremetal ++# OpenDaylightHostname: opendaylight-server ++# OpenDaylightImage: overcloud-full ++# OpenDaylightUsername: admin ++# OpenDaylightPassword: admin ++ ++parameters: ++ # increase this if you need more ODL nodes ++ # OpenDaylightCount: 1 ++ ExtraConfig: ++ neutron_mechanism_drivers: ['opendaylight'] ++ neutron_tenant_network_type: vxlan ++ neutron_l3_ha: false +diff --git a/environments/opendaylight.yaml b/environments/opendaylight.yaml +new file mode 100644 +index 0000000..07e80ab +--- /dev/null ++++ b/environments/opendaylight.yaml +@@ -0,0 +1,18 @@ ++# Environment file used to enable OpenDaylight ++# Currently uses overcloud image that is assumed ++# to be virt-customized with ODL RPM already on it ++ ++# These parameters customize the OpenDaylight Service ++# Defaults are included here for reference ++#parameter_defaults: ++# OpenDaylightUsername: admin ++# OpenDaylightPassword: admin ++ ++parameters: ++ # instructs there not to be any ODL only nodes ++ OpenDaylightCount: 0 ++ ExtraConfig: ++ neutron_mechanism_drivers: ['opendaylight'] ++ neutron_tenant_network_type: vxlan ++ opendaylight_install: true ++ neutron_l3_ha: false +diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml +index 4cfed6b..adecc79 100644 +--- a/overcloud-resource-registry-puppet.yaml ++++ b/overcloud-resource-registry-puppet.yaml +@@ -27,6 +27,9 @@ resource_registry: + # To disable, replace with firstboot/userdata_default.yaml + OS::TripleO::NodeAdminUserData: firstboot/userdata_heat_admin.yaml + ++ # This configures OpenDaylight to drive the network ++ OS::TripleO::OpenDaylightNode: puppet/opendaylight-puppet.yaml ++ + # Hooks for operator extra config + # NodeUserData == Cloud-init additional user-data, e.g cloud-config + # ControllerExtraConfigPre == Controller configuration pre service deployment +diff --git a/overcloud-without-mergepy.yaml b/overcloud-without-mergepy.yaml +index a532c2f..733eb7f 100644 +--- a/overcloud-without-mergepy.yaml ++++ b/overcloud-without-mergepy.yaml +@@ -227,6 +227,23 @@ parameters: + default: false + description: Should MongoDb journaling be disabled + type: boolean ++ OpenDaylightPort: ++ default: 8081 ++ description: Set opendaylight service port ++ type: number ++ OpenDaylightInstall: ++ default: false ++ description: Whether to install OpenDaylight on the control nodes. ++ type: boolean ++ OpenDaylightUsername: ++ default: 'admin' ++ description: The username for the opendaylight server. ++ type: string ++ OpenDaylightPassword: ++ default: 'admin' ++ type: string ++ description: The password for the opendaylight server. ++ hidden: true + PublicVirtualFixedIPs: + default: [] + description: > +@@ -664,6 +681,18 @@ parameters: + structure as ExtraConfig. + type: json + ++# OpenDaylight specific parameters ++ OpenDaylightCount: ++ type: number ++ default: 1 ++ OpenDaylightImage: ++ default: overcloud-full ++ type: string ++ OpenDaylightFlavor: ++ default: baremetal ++ description: Flavor for OpenDaylight node ++ type: string ++ + # Hostname format for each role + # Note %index% is translated into the index of the node, e.g 0/1/2 etc + # and %stackname% is replaced with OS::stack_name in the template below. +@@ -688,6 +717,10 @@ parameters: + type: string + description: Format for CephStorage node hostnames + default: '%stackname%-cephstorage-%index%' ++ OpenDaylightHostnameFormat: ++ type: string ++ description: Format for OpenDaylight node hostnames ++ default: '%stackname%-opendaylight-%index%' + + # Identifiers to trigger tasks on nodes + UpdateIdentifier: +@@ -770,6 +803,27 @@ resources: + SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]} + PublicVirtualIP: {get_attr: [VipMap, net_ip_map, external]} + ++ OpenDaylightNode: ++ type: OS::Heat::ResourceGroup ++ depends_on: Networks ++ properties: ++ count: {get_param: OpenDaylightCount} ++ removal_policies: {get_param: ComputeRemovalPolicies} ++ resource_def: ++ type: OS::TripleO::OpenDaylightNode ++ properties: ++ UpdateIdentifier: {get_param: UpdateIdentifier} ++ OpenDaylightFlavor: {get_param: OpenDaylightFlavor} ++ OpenDaylightImage: {get_param: OpenDaylightImage} ++ OpenDaylightPort: {get_param: OpenDaylightPort} ++ OpenDaylightUsername: {get_param: OpenDaylightUsername} ++ OpenDaylightPassword: {get_param: OpenDaylightPassword} ++ OpenDaylightHostname: ++ str_replace: ++ template: {get_param: OpenDaylightHostnameFormat} ++ params: ++ '%stackname%': {get_param: 'OS::stack_name'} ++ + Controller: + type: OS::Heat::ResourceGroup + depends_on: Networks +@@ -853,6 +907,10 @@ resources: + NovaPassword: {get_param: NovaPassword} + NtpServer: {get_param: NtpServer} + MongoDbNoJournal: {get_param: MongoDbNoJournal} ++ OpenDaylightPort: {get_param: OpenDaylightPort} ++ OpenDaylightInstall: {get_param: OpenDaylightInstall} ++ OpenDaylightUsername: {get_param: OpenDaylightUsername} ++ OpenDaylightPassword: {get_param: OpenDaylightPassword} + PcsdPassword: {get_resource: PcsdPassword} + PublicVirtualInterface: {get_param: PublicVirtualInterface} + RabbitPassword: {get_param: RabbitPassword} +@@ -948,6 +1006,9 @@ resources: + NovaPublicIP: {get_attr: [PublicVirtualIP, ip_address]} + NovaPassword: {get_param: NovaPassword} + NtpServer: {get_param: NtpServer} ++ OpenDaylightPort: {get_param: OpenDaylightPort} ++ OpenDaylightUsername: {get_param: OpenDaylightUsername} ++ OpenDaylightPassword: {get_param: OpenDaylightPassword} + RabbitHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]} + RabbitPassword: {get_param: RabbitPassword} + RabbitUserName: {get_param: RabbitUserName} +@@ -1068,6 +1129,7 @@ resources: + compute_hosts: {get_attr: [Compute, hosts_entry]} + controller_hosts: {get_attr: [Controller, hosts_entry]} + controller_ips: {get_attr: [Controller, ip_address]} ++ opendaylight_ip: {get_attr: [OpenDaylightNode, ip_address]} + block_storage_hosts: {get_attr: [BlockStorage, hosts_entry]} + object_storage_hosts: {get_attr: [ObjectStorage, hosts_entry]} + ceph_storage_hosts: {get_attr: [CephStorage, hosts_entry]} +diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml +index 2bc519b..98283c2 100644 +--- a/puppet/all-nodes-config.yaml ++++ b/puppet/all-nodes-config.yaml +@@ -8,6 +8,8 @@ parameters: + type: comma_delimited_list + controller_ips: + type: comma_delimited_list ++ opendaylight_ip: ++ type: comma_delimited_list + block_storage_hosts: + type: comma_delimited_list + object_storage_hosts: +@@ -82,6 +84,10 @@ resources: + raw_data: {get_file: hieradata/RedHat.yaml} + all_nodes: + mapped_data: ++ opendaylight_controller_ip: ++ list_join: ++ - ',' ++ - {get_param: opendaylight_ip} + controller_node_ips: + list_join: + - ',' +diff --git a/puppet/compute.yaml b/puppet/compute.yaml +index 70c7403..df6c16f 100644 +--- a/puppet/compute.yaml ++++ b/puppet/compute.yaml +@@ -213,6 +213,19 @@ parameters: + NtpServer: + type: string + default: '' ++ OpenDaylightPort: ++ default: 8081 ++ description: Set opendaylight service port ++ type: number ++ OpenDaylightUsername: ++ default: 'admin' ++ description: The username for the opendaylight server. ++ type: string ++ OpenDaylightPassword: ++ default: 'admin' ++ type: string ++ description: The password for the opendaylight server. ++ hidden: true + RabbitHost: + type: string + default: '' # Has to be here because of the ignored empty value bug +@@ -406,6 +419,9 @@ resources: + neutron::rabbit_user: {get_input: rabbit_user} + neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} + neutron::rabbit_port: {get_input: rabbit_client_port} ++ opendaylight_port: {get_input: opendaylight_port} ++ opendaylight_username: {get_input: opendaylight_username} ++ opendaylight_password: {get_input: opendaylight_password} + neutron_flat_networks: {get_input: neutron_flat_networks} + neutron_host: {get_input: neutron_host} + neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} +@@ -459,6 +475,9 @@ resources: + snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName} + snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword} + glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]} ++ opendaylight_port: {get_param: OpenDaylightPort} ++ opendaylight_username: {get_param: OpenDaylightUsername} ++ opendaylight_password: {get_param: OpenDaylightPassword} + neutron_flat_networks: {get_param: NeutronFlatNetworks} + neutron_host: {get_param: NeutronHost} + neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} +diff --git a/puppet/controller.yaml b/puppet/controller.yaml +index ea0b3af..a339eb2 100644 +--- a/puppet/controller.yaml ++++ b/puppet/controller.yaml +@@ -443,6 +443,23 @@ parameters: + NtpServer: + type: string + default: '' ++ OpenDaylightPort: ++ default: 8081 ++ description: Set opendaylight service port ++ type: number ++ OpenDaylightInstall: ++ default: false ++ description: Whether to install OpenDaylight on the control nodes. ++ type: boolean ++ OpenDaylightUsername: ++ default: 'admin' ++ description: The username for the opendaylight server. ++ type: string ++ OpenDaylightPassword: ++ default: 'admin' ++ type: string ++ description: The password for the opendaylight server. ++ hidden: true + PcsdPassword: + type: string + description: The password for the 'pcsd' user. +@@ -805,6 +822,10 @@ resources: + template: tripleo-CLUSTER + params: + CLUSTER: {get_param: MysqlClusterUniquePart} ++ opendaylight_port: {get_param: OpenDaylightPort} ++ opendaylight_install: {get_param: OpenDaylightInstall} ++ opendaylight_username: {get_param: OpenDaylightUsername} ++ opendaylight_password: {get_param: OpenDaylightPassword} + neutron_flat_networks: {get_param: NeutronFlatNetworks} + neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} + neutron_agent_mode: {get_param: NeutronAgentMode} +@@ -1136,6 +1157,12 @@ resources: + mysql_bind_host: {get_input: mysql_network} + mysql_virtual_ip: {get_input: mysql_virtual_ip} + ++ # OpenDaylight ++ opendaylight_port: {get_input: opendaylight_port} ++ opendaylight_install: {get_input: opendaylight_install} ++ opendaylight_username: {get_input: opendaylight_username} ++ opendaylight_password: {get_input: opendaylight_password} ++ + # Neutron + neutron::bind_host: {get_input: neutron_api_network} + neutron::rabbit_password: {get_input: rabbit_password} +diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp +index cd41cc7..a81f88d 100644 +--- a/puppet/manifests/overcloud_compute.pp ++++ b/puppet/manifests/overcloud_compute.pp +@@ -75,9 +75,30 @@ class { '::neutron::plugins::ml2': + tenant_network_types => [hiera('neutron_tenant_network_type')], + } + +-class { '::neutron::agents::ml2::ovs': +- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), +- tunnel_types => split(hiera('neutron_tunnel_types'), ','), ++if 'opendaylight' in hiera('neutron_mechanism_drivers') { ++ ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ $controller_ips = split(hiera('controller_node_ips'), ',') ++ $opendaylight_controller_ip = $controller_ips[0] ++ } else { ++ $opendaylight_controller_ip = hiera('opendaylight_controller_ip') ++ } ++ ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ class { 'neutron::plugins::ovs::opendaylight': ++ odl_controller_ip => $opendaylight_controller_ip, ++ tunnel_ip => hiera('neutron::agents::ml2::ovs::local_ip'), ++ odl_port => hiera('opendaylight_port'), ++ odl_username => hiera('opendaylight_username'), ++ odl_password => hiera('opendaylight_password'), ++ } ++ } ++ ++} else { ++ class { 'neutron::agents::ml2::ovs': ++ bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), ++ tunnel_types => split(hiera('neutron_tunnel_types'), ','), ++ } + } + + if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { +diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp +index 1f6c2be..75bbee7 100644 +--- a/puppet/manifests/overcloud_controller.pp ++++ b/puppet/manifests/overcloud_controller.pp +@@ -30,6 +30,13 @@ if hiera('step') >= 1 { + + if hiera('step') >= 2 { + ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ class {"opendaylight": ++ extra_features => ['odl-ovsdb-openstack'], ++ odl_rest_port => hiera('opendaylight_port'), ++ } ++ } ++ + if count(hiera('ntp::servers')) > 0 { + include ::ntp + } +@@ -242,10 +249,43 @@ if hiera('step') >= 3 { + tenant_network_types => [hiera('neutron_tenant_network_type')], + mechanism_drivers => [hiera('neutron_mechanism_drivers')], + } +- class { '::neutron::agents::ml2::ovs': +- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), +- tunnel_types => split(hiera('neutron_tunnel_types'), ','), ++ ++ if 'opendaylight' in hiera('neutron_mechanism_drivers') { ++ ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ $controller_ips = split(hiera('controller_node_ips'), ',') ++ $opendaylight_controller_ip = $controller_ips[0] ++ } else { ++ $opendaylight_controller_ip = hiera('opendaylight_controller_ip') ++ } ++ ++ class { 'neutron::plugins::ml2::opendaylight': ++ odl_controller_ip => $opendaylight_controller_ip, ++ odl_username => hiera('opendaylight_username'), ++ odl_password => hiera('opendaylight_password'), ++ odl_port => hiera('opendaylight_port'), ++ } ++ ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ class { 'neutron::plugins::ovs::opendaylight': ++ odl_controller_ip => $opendaylight_controller_ip, ++ tunnel_ip => hiera('neutron::agents::ml2::ovs::local_ip'), ++ odl_port => hiera('opendaylight_port'), ++ odl_username => hiera('opendaylight_username'), ++ odl_password => hiera('opendaylight_password'), ++ } ++ } ++ ++ } else { ++ ++ class { 'neutron::agents::ml2::ovs': ++ bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), ++ tunnel_types => split(hiera('neutron_tunnel_types'), ','), ++ } ++ ++ Service['neutron-server'] -> Service['neutron-ovs-agent-service'] + } ++ + if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::nexus1000v + +@@ -281,7 +321,6 @@ if hiera('step') >= 3 { + + Service['neutron-server'] -> Service['neutron-dhcp-service'] + Service['neutron-server'] -> Service['neutron-l3'] +- Service['neutron-server'] -> Service['neutron-ovs-agent-service'] + Service['neutron-server'] -> Service['neutron-metadata'] + + include ::cinder +diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp +index 3fb92f3..31c5332 100644 +--- a/puppet/manifests/overcloud_controller_pacemaker.pp ++++ b/puppet/manifests/overcloud_controller_pacemaker.pp +@@ -380,6 +380,13 @@ if hiera('step') >= 2 { + + } + ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ class {"opendaylight": ++ extra_features => ['odl-ovsdb-openstack'], ++ odl_rest_port => hiera('opendaylight_port'), ++ } ++ } ++ + exec { 'galera-ready' : + command => '/usr/bin/clustercheck >/dev/null', + timeout => 30, +@@ -614,13 +621,43 @@ if hiera('step') >= 3 { + tenant_network_types => [hiera('neutron_tenant_network_type')], + mechanism_drivers => [hiera('neutron_mechanism_drivers')], + } +- class { '::neutron::agents::ml2::ovs': +- manage_service => false, +- enabled => false, +- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), +- tunnel_types => split(hiera('neutron_tunnel_types'), ','), +- } ++ if 'opendaylight' in hiera('neutron_mechanism_drivers') { ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ $controller_ips = split(hiera('controller_node_ips'), ',') ++ $opendaylight_controller_ip = $controller_ips[0] ++ } else { ++ $opendaylight_controller_ip = hiera('opendaylight_controller_ip') ++ } ++ ++ $opendaylight_port = hiera('opendaylight_port') ++ $private_ip = hiera('neutron::agents::ml2::ovs::local_ip') ++ ++ class { 'neutron::plugins::ml2::opendaylight': ++ odl_controller_ip => $opendaylight_controller_ip, ++ odl_username => hiera('opendaylight_username'), ++ odl_password => hiera('opendaylight_password'), ++ odl_port => hiera('opendaylight_port'), ++ } ++ ++ if str2bool(hiera('opendaylight_install', 'false')) { ++ class { 'neutron::plugins::ovs::opendaylight': ++ odl_controller_ip => $opendaylight_controller_ip, ++ tunnel_ip => hiera('neutron::agents::ml2::ovs::local_ip'), ++ odl_port => hiera('opendaylight_port'), ++ odl_username => hiera('opendaylight_username'), ++ odl_password => hiera('opendaylight_password'), ++ } ++ } ++ ++ } else { + ++ class { 'neutron::agents::ml2::ovs': ++ manage_service => false, ++ enabled => false, ++ bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), ++ tunnel_types => split(hiera('neutron_tunnel_types'), ','), ++ } ++ } + if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') { + include ::neutron::plugins::ml2::cisco::ucsm + } +@@ -1061,56 +1098,13 @@ if hiera('step') >= 4 { + pacemaker::resource::service { $::neutron::params::dhcp_agent_service: + clone_params => 'interleave=true', + } +- pacemaker::resource::service { $::neutron::params::ovs_agent_service: +- clone_params => 'interleave=true', +- } + pacemaker::resource::service { $::neutron::params::metadata_agent_service: + clone_params => 'interleave=true', + } +- pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: +- ocf_agent_name => 'neutron:OVSCleanup', +- clone_params => 'interleave=true', +- } + pacemaker::resource::ocf { 'neutron-netns-cleanup': + ocf_agent_name => 'neutron:NetnsCleanup', + clone_params => 'interleave=true', + } +- +- # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent +- pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint': +- constraint_type => 'order', +- first_resource => "${::neutron::params::ovs_cleanup_service}-clone", +- second_resource => 'neutron-netns-cleanup-clone', +- first_action => 'start', +- second_action => 'start', +- require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], +- Pacemaker::Resource::Ocf['neutron-netns-cleanup']], +- } +- pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation': +- source => 'neutron-netns-cleanup-clone', +- target => "${::neutron::params::ovs_cleanup_service}-clone", +- score => 'INFINITY', +- require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service], +- Pacemaker::Resource::Ocf['neutron-netns-cleanup']], +- } +- pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint': +- constraint_type => 'order', +- first_resource => 'neutron-netns-cleanup-clone', +- second_resource => "${::neutron::params::ovs_agent_service}-clone", +- first_action => 'start', +- second_action => 'start', +- require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], +- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], +- } +- pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation': +- source => "${::neutron::params::ovs_agent_service}-clone", +- target => 'neutron-netns-cleanup-clone', +- score => 'INFINITY', +- require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'], +- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], +- } +- +- #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3 + pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': + constraint_type => 'order', + first_resource => "${::keystone::params::service_name}-clone", +@@ -1120,31 +1114,75 @@ if hiera('step') >= 4 { + require => [Pacemaker::Resource::Service[$::keystone::params::service_name], + Pacemaker::Resource::Service[$::neutron::params::server_service]], + } +- pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': +- constraint_type => 'order', +- first_resource => "${::neutron::params::server_service}-clone", +- second_resource => "${::neutron::params::ovs_agent_service}-clone", +- first_action => 'start', +- second_action => 'start', +- require => [Pacemaker::Resource::Service[$::neutron::params::server_service], +- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], +- } +- pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': +- constraint_type => 'order', +- first_resource => "${::neutron::params::ovs_agent_service}-clone", +- second_resource => "${::neutron::params::dhcp_agent_service}-clone", +- first_action => 'start', +- second_action => 'start', +- require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], +- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], ++ if 'openvswitch' in hiera('neutron_mechanism_drivers') { ++ pacemaker::resource::service { $::neutron::params::ovs_agent_service: ++ clone_params => "interleave=true", ++ } ++ pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: ++ ocf_agent_name => "neutron:OVSCleanup", ++ clone_params => "interleave=true", ++ } ++ # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent ++ pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint': ++ constraint_type => "order", ++ first_resource => "${::neutron::params::ovs_cleanup_service}-clone", ++ second_resource => "neutron-netns-cleanup-clone", ++ first_action => "start", ++ second_action => "start", ++ require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], ++ Pacemaker::Resource::Ocf['neutron-netns-cleanup']], ++ } ++ pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation': ++ source => "neutron-netns-cleanup-clone", ++ target => "${::neutron::params::ovs_cleanup_service}-clone", ++ score => "INFINITY", ++ require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], ++ Pacemaker::Resource::Ocf['neutron-netns-cleanup']], ++ } ++ pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint': ++ constraint_type => "order", ++ first_resource => "neutron-netns-cleanup-clone", ++ second_resource => "${::neutron::params::ovs_agent_service}-clone", ++ first_action => "start", ++ second_action => "start", ++ require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], ++ Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], ++ } ++ pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation': ++ source => "${::neutron::params::ovs_agent_service}-clone", ++ target => "neutron-netns-cleanup-clone", ++ score => "INFINITY", ++ require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], ++ Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], ++ } + +- } +- pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': +- source => "${::neutron::params::dhcp_agent_service}-clone", +- target => "${::neutron::params::ovs_agent_service}-clone", +- score => 'INFINITY', +- require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service], +- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]], ++ #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3 ++ pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': ++ constraint_type => "order", ++ first_resource => "${::neutron::params::server_service}-clone", ++ second_resource => "${::neutron::params::ovs_agent_service}-clone", ++ first_action => "start", ++ second_action => "start", ++ require => [Pacemaker::Resource::Service[$::neutron::params::server_service], ++ Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], ++ } ++ pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': ++ constraint_type => "order", ++ first_resource => "${::neutron::params::ovs_agent_service}-clone", ++ second_resource => "${::neutron::params::dhcp_agent_service}-clone", ++ first_action => "start", ++ second_action => "start", ++ require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], ++ Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], ++ ++ } ++ pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': ++ source => "${::neutron::params::dhcp_agent_service}-clone", ++ target => "${::neutron::params::ovs_agent_service}-clone", ++ score => "INFINITY", ++ require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], ++ Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], ++ } + } + pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': + constraint_type => 'order', +diff --git a/puppet/manifests/overcloud_opendaylight.pp b/puppet/manifests/overcloud_opendaylight.pp +new file mode 100644 +index 0000000..aea6568 +--- /dev/null ++++ b/puppet/manifests/overcloud_opendaylight.pp +@@ -0,0 +1,26 @@ ++# Copyright 2015 Red Hat, Inc. ++# All Rights Reserved. ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may ++# not use this file except in compliance with the License. You may obtain ++# a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++include ::tripleo::packages ++ ++if count(hiera('ntp::servers')) > 0 { ++ include ::ntp ++} ++ ++class {"opendaylight": ++ extra_features => ['odl-ovsdb-openstack'], ++ odl_rest_port => hiera('opendaylight_port'), ++} ++ +diff --git a/puppet/opendaylight-puppet.yaml b/puppet/opendaylight-puppet.yaml +new file mode 100644 +index 0000000..70f2543 +--- /dev/null ++++ b/puppet/opendaylight-puppet.yaml +@@ -0,0 +1,209 @@ ++heat_template_version: 2015-04-30 ++ ++description: > ++ OpenDaylight node configured by Puppet. ++ ++parameters: ++ OpenDaylightFlavor: ++ default: baremetal ++ description: The flavor to use for the OpenDaylight node ++ type: string ++ OpenDaylightImage: ++ default: overcloud-full ++ description: The image to use for the OpenDaylight node ++ type: string ++ OpenDaylightHostname: ++ default: opendaylight-server ++ description: The hostname to use for the OpenDaylight node ++ type: string ++ OpenDaylightUsername: ++ default: admin ++ description: The admin user for the OpenDaylight node ++ type: string ++ OpenDaylightPassword: ++ default: '' ++ description: The admin password for the OpenDaylight node ++ type: string ++ hidden: true ++ OpenDaylightPort: ++ default: 8081 ++ description: Set OpenDaylight service port ++ type: number ++ KeyName: ++ description: The keypair to use for SSH access to the node (via heat-admin user) ++ type: string ++ default: default ++ constraints: ++ - custom_constraint: nova.keypair ++ ImageUpdatePolicy: ++ default: 'REBUILD_PRESERVE_EPHEMERAL' ++ description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt. ++ type: string ++ UpdateIdentifier: ++ default: '' ++ type: string ++ description: > ++ Setting to a previously unused value during stack-update will trigger ++ package update on all nodes ++ NtpServer: ++ type: string ++ default: '' ++ PublicInterface: ++ default: nic1 ++ description: What interface to bridge onto br-ex for network nodes. ++ type: string ++ ++resources: ++ OpenDaylightNode: ++ type: OS::Nova::Server ++ properties: ++ image: {get_param: OpenDaylightImage} ++ image_update_policy: {get_param: ImageUpdatePolicy} ++ flavor: {get_param: OpenDaylightFlavor} ++ key_name: {get_param: KeyName} ++ networks: ++ - network: ctlplane ++ user_data_format: SOFTWARE_CONFIG ++ user_data: {get_resource: NodeUserData} ++ name: {get_param: OpenDaylightHostname} ++ ++ NodeUserData: ++ type: OS::TripleO::NodeUserData ++ ++ ExternalPort: ++ type: OS::TripleO::Controller::Ports::ExternalPort ++ properties: ++ ControlPlaneIP: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]} ++ ++ InternalApiPort: ++ type: OS::TripleO::Controller::Ports::InternalApiPort ++ properties: ++ ControlPlaneIP: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]} ++ ++ NetIpMap: ++ type: OS::TripleO::Network::Ports::NetIpMap ++ properties: ++ ControlPlaneIp: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]} ++ ExternalIp: {get_attr: [ExternalPort, ip_address]} ++ InternalApiIp: {get_attr: [InternalApiPort, ip_address]} ++ ++ NetIpSubnetMap: ++ type: OS::TripleO::Network::Ports::NetIpSubnetMap ++ properties: ++ ControlPlaneIp: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]} ++ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ++ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} ++ ++ NetworkConfig: ++ type: OS::TripleO::Controller::Net::SoftwareConfig ++ properties: ++ ControlPlaneIp: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]} ++ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]} ++ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]} ++ ++ NetworkDeployment: ++ type: OS::TripleO::SoftwareDeployment ++ properties: ++ config: {get_resource: NetworkConfig} ++ server: {get_resource: OpenDaylightNode} ++ input_values: ++ bridge_name: br-ex ++ interface_name: {get_param: PublicInterface} ++ ++ OpenDaylightDeployment: ++ type: OS::TripleO::SoftwareDeployment ++ depends_on: NetworkDeployment ++ properties: ++ config: {get_resource: OpenDaylightConfig} ++ server: {get_resource: OpenDaylightNode} ++ input_values: ++ ntp_servers: ++ str_replace: ++ template: '["server"]' ++ params: ++ server: {get_param: NtpServer} ++ opendaylight_port: {get_param: OpenDaylightPort} ++ ++ OpenDaylightConfig: ++ type: OS::Heat::StructuredConfig ++ properties: ++ group: os-apply-config ++ config: ++ hiera: ++ hierarchy: ++ - '"%{::uuid}"' ++ - heat_config_%{::deploy_config_name} ++ - extraconfig ++ - bootstrap_node # provided by BootstrapNodeConfig ++ - all_nodes # provided by allNodesConfig ++ - vip_data # provided by vip-config ++ - RedHat # Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1236143 ++ - common ++ datafiles: ++ common: ++ raw_data: {get_file: hieradata/common.yaml} ++ mapped_data: ++ ntp::servers: {get_input: ntp_servers} ++ opendaylight::admin_username: {get_param: OpenDaylightUsername} ++ opendaylight::admin_password: {get_param: OpenDaylightPassword} ++ opendaylight_port: {get_input: opendaylight_port} ++ ceph: ++ raw_data: {get_file: hieradata/ceph.yaml} ++ ++ UpdateConfig: ++ type: OS::TripleO::Tasks::PackageUpdate ++ ++ UpdateDeployment: ++ type: OS::Heat::SoftwareDeployment ++ properties: ++ config: {get_resource: UpdateConfig} ++ server: {get_resource: OpenDaylightNode} ++ input_values: ++ update_identifier: ++ get_param: UpdateIdentifier ++ ++ OpenDaylightHostsConfig: ++ type: OS::Heat::SoftwareConfig ++ properties: ++ group: script ++ config: | ++ #!/usr/bin/env bash ++ echo -e "$(facter ipaddress)\t\t$(hostname -f)\t$(hostname -s)" >> /etc/hosts ++ ++ OpenDaylightHostsDeployment: ++ type: OS::Heat::StructuredDeployment ++ depends_on: OpenDaylightDeployment ++ properties: ++ server: {get_resource: OpenDaylightNode} ++ config: {get_resource: OpenDaylightHostsConfig} ++ ++ OpenDaylightPuppetConfig: ++ type: OS::Heat::SoftwareConfig ++ properties: ++ group: puppet ++ config: ++ get_file: manifests/overcloud_opendaylight.pp ++ ++ OpenDaylightPuppetDeployment: ++ depends_on: OpenDaylightHostsDeployment ++ type: OS::Heat::StructuredDeployment ++ properties: ++ server: {get_resource: OpenDaylightNode} ++ config: {get_resource: OpenDaylightPuppetConfig} ++ input_values: ++ update_identifier: {get_param: UpdateIdentifier} ++ ++outputs: ++ ip_address: ++ description: IP address of the server in the ctlplane network ++ value: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]} ++ opendaylight_controller_ip: ++ description: IP address of the server on the internal network ++ value: {get_attr: [InternalApiPort, ip_address]} ++ config_identifier: ++ description: identifier which changes if the node configuration may need re-applying ++ value: ++ list_join: ++ - ',' ++ - - {get_attr: [OpenDaylightDeployment, deploy_stdout]} ++ - {get_param: UpdateIdentifier} +-- +2.5.0 + diff --git a/build/opendaylight.patch b/build/opendaylight.patch deleted file mode 100644 index 5376665c..00000000 --- a/build/opendaylight.patch +++ /dev/null @@ -1,446 +0,0 @@ -From bdaa77b2b92f470fe0bc6b18bff5f2af1f7b65cf Mon Sep 17 00:00:00 2001 -From: Tim Rozet <tdrozet@gmail.com> -Date: Tue, 23 Jun 2015 17:46:00 -0400 -Subject: [PATCH] Adds OpenDaylight support - -To enable set neturon Mechanism Drivers to opendaylight via ExtraConfig: - - EnableOpenDaylight (used to enable ODL, defaults to false) - - OpenDaylightPort (used to define ODL REST Port, default 8081) - -Change-Id: I2a4c5b69ee0ad70d2372cad23b9af0890715c85f -Signed-off-by: Dan Radez <dradez@redhat.com> ---- - environments/opendaylight.yaml | 4 + - puppet/compute.yaml | 6 + - puppet/controller.yaml | 8 + - puppet/manifests/overcloud_compute.pp | 31 +++- - puppet/manifests/overcloud_controller.pp | 49 +++++- - puppet/manifests/overcloud_controller_pacemaker.pp | 183 +++++++++++++-------- - 6 files changed, 201 insertions(+), 80 deletions(-) - create mode 100644 environments/opendaylight.yaml - -diff --git a/environments/opendaylight.yaml b/environments/opendaylight.yaml -new file mode 100644 -index 0000000..39e4aa3 ---- /dev/null -+++ b/environments/opendaylight.yaml -@@ -0,0 +1,4 @@ -+parameters: -+ ExtraConfig: -+ neutron_mechanism_drivers: ['opendaylight'] -+ neutron_tenant_network_type: vxlan -diff --git a/puppet/compute.yaml b/puppet/compute.yaml -index 2b63535..3f20d48 100644 ---- a/puppet/compute.yaml -+++ b/puppet/compute.yaml -@@ -221,6 +221,10 @@ parameters: - NtpServer: - type: string - default: '' -+ OpenDaylightPort: -+ default: 8081 -+ description: Set opendaylight service port -+ type: number - RabbitHost: - type: string - default: '' # Has to be here because of the ignored empty value bug -@@ -409,6 +413,7 @@ resources: - neutron::rabbit_user: {get_input: rabbit_user} - neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl} - neutron::rabbit_port: {get_input: rabbit_client_port} -+ opendaylight_port: {get_input: opendaylight_port} - neutron_flat_networks: {get_input: neutron_flat_networks} - neutron_host: {get_input: neutron_host} - neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip} -@@ -474,6 +479,7 @@ resources: - - {get_param: GlanceHost} - - ':' - - {get_param: GlancePort} -+ opendaylight_port: {get_param: OpenDaylightPort} - neutron_flat_networks: {get_param: NeutronFlatNetworks} - neutron_host: {get_param: NeutronHost} - neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]} -diff --git a/puppet/controller.yaml b/puppet/controller.yaml -index 0bb8035..fa0dc3e 100644 ---- a/puppet/controller.yaml -+++ b/puppet/controller.yaml -@@ -427,6 +427,10 @@ parameters: - NtpServer: - type: string - default: '' -+ OpenDaylightPort: -+ default: 8081 -+ description: Set opendaylight service port -+ type: number - PcsdPassword: - type: string - description: The password for the 'pcsd' user. -@@ -794,6 +798,7 @@ resources: - template: tripleo-CLUSTER - params: - CLUSTER: {get_param: MysqlClusterUniquePart} -+ opendaylight_port: {get_param: OpenDaylightPort} - neutron_flat_networks: {get_param: NeutronFlatNetworks} - neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret} - neutron_agent_mode: {get_param: NeutronAgentMode} -@@ -1136,6 +1141,9 @@ resources: - mysql_bind_host: {get_input: mysql_network} - mysql_virtual_ip: {get_input: mysql_virtual_ip} - -+ # OpenDaylight -+ opendaylight_port: {get_input: opendaylight_port} -+ - # Neutron - neutron::bind_host: {get_input: neutron_api_network} - neutron::rabbit_password: {get_input: rabbit_password} -diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp -index 2150bab..9846636 100644 ---- a/puppet/manifests/overcloud_compute.pp -+++ b/puppet/manifests/overcloud_compute.pp -@@ -21,6 +21,8 @@ if count(hiera('ntp::servers')) > 0 { - include ::ntp - } - -+$controller_node_ips = split(hiera('controller_node_ips'), ',') -+ - file { ['/etc/libvirt/qemu/networks/autostart/default.xml', - '/etc/libvirt/qemu/networks/default.xml']: - ensure => absent, -@@ -74,9 +76,32 @@ class { 'neutron::plugins::ml2': - tenant_network_types => [hiera('neutron_tenant_network_type')], - } - --class { 'neutron::agents::ml2::ovs': -- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), -- tunnel_types => split(hiera('neutron_tunnel_types'), ','), -+if 'opendaylight' in hiera('neutron_mechanism_drivers') { -+ $opendaylight_port = hiera('opendaylight_port') -+ $private_ip = hiera('neutron::agents::ml2::ovs::local_ip') -+ -+ exec { 'Wait for NetVirt OVS to come up': -+ command => "/bin/curl -o /dev/null --fail --silent --head -u admin:admin \ -+ http://${controller_node_ips[0]}:${opendaylight_port}/restconf/operational/network-topology:network-topology", -+ tries => 20, -+ try_sleep => 60, -+ } -> -+ # OVS manager -+ exec { 'Set OVS Manager to OpenDaylight': -+ command => "/usr/bin/ovs-vsctl set-manager tcp:${controller_node_ips[0]}:6640", -+ unless => "/usr/bin/ovs-vsctl show | /usr/bin/grep 'Manager \"tcp:${controller_node_ips[0]}:6640\"'", -+ } -> -+ # local ip -+ exec { 'Set local_ip Other Option': -+ command => "/usr/bin/ovs-vsctl set Open_vSwitch $(ovs-vsctl get Open_vSwitch . _uuid) other_config:local_ip=$private_ip", -+ unless => "/usr/bin/ovs-vsctl list Open_vSwitch | /usr/bin/grep 'local_ip=\"$private_ip\"'", -+ } -+ -+} else { -+ class { 'neutron::agents::ml2::ovs': -+ bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), -+ tunnel_types => split(hiera('neutron_tunnel_types'), ','), -+ } - } - - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { -diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp -index c330236..68739a6 100644 ---- a/puppet/manifests/overcloud_controller.pp -+++ b/puppet/manifests/overcloud_controller.pp -@@ -30,6 +30,13 @@ if hiera('step') >= 1 { - - if hiera('step') >= 2 { - -+ if 'opendaylight' in hiera('neutron_mechanism_drivers') { -+ class {"opendaylight": -+ extra_features => ['odl-ovsdb-openstack', 'odl-dlux-core', 'odl-sfc-core', 'odl-sfc-ui', 'odl-sfc-sb-rest', 'odl-sfc-ovs', 'odl-sfc-netconf', 'odl-sfclisp', 'odl-sfcofl2'], -+ odl_rest_port => hiera('opendaylight_port'), -+ } -+ } -+ - if count(hiera('ntp::servers')) > 0 { - include ::ntp - } -@@ -242,10 +249,45 @@ if hiera('step') >= 3 { - tenant_network_types => [hiera('neutron_tenant_network_type')], - mechanism_drivers => [hiera('neutron_mechanism_drivers')], - } -- class { 'neutron::agents::ml2::ovs': -- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), -- tunnel_types => split(hiera('neutron_tunnel_types'), ','), -+ -+ if 'opendaylight' in hiera('neutron_mechanism_drivers') { -+ -+ $opendaylight_port = hiera('opendaylight_port') -+ $private_ip = hiera('neutron::agents::ml2::ovs::local_ip') -+ -+ neutron_plugin_ml2 { -+ 'ml2_odl/username': value => 'admin'; -+ 'ml2_odl/password': value => 'admin'; -+ 'ml2_odl/url': value => "http://${controller_node_ips[0]}:${opendaylight_port}/controller/nb/v2/neutron"; -+ } -+ -+ exec { 'Wait for NetVirt OVSDB to come up': -+ command => "/bin/curl -o /dev/null --fail --silent --head -u admin:admin \ -+ http://${controller_node_ips[0]}:${opendaylight_port}/restconf/operational/network-topology:network-topology", -+ tries => 20, -+ try_sleep => 60, -+ } -> -+ # OVS manager -+ exec { 'Set OVS Manager to OpenDaylight': -+ command => "/usr/bin/ovs-vsctl set-manager tcp:${controller_node_ips[0]}:6640", -+ unless => "/usr/bin/ovs-vsctl show | /usr/bin/grep 'Manager \"tcp:${controller_node_ips[0]}:6640\"'", -+ } -> -+ # local ip -+ exec { 'Set local_ip Other Option': -+ command => "/usr/bin/ovs-vsctl set Open_vSwitch $(ovs-vsctl get Open_vSwitch . _uuid) other_config:local_ip=$private_ip", -+ unless => "/usr/bin/ovs-vsctl list Open_vSwitch | /usr/bin/grep 'local_ip=\"$private_ip\"'", -+ } -+ -+ } else { -+ -+ class { 'neutron::agents::ml2::ovs': -+ bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), -+ tunnel_types => split(hiera('neutron_tunnel_types'), ','), -+ } -+ -+ Service['neutron-server'] -> Service['neutron-ovs-agent-service'] - } -+ - if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') { - include neutron::plugins::ml2::cisco::nexus1000v - -@@ -281,7 +323,6 @@ if hiera('step') >= 3 { - - Service['neutron-server'] -> Service['neutron-dhcp-service'] - Service['neutron-server'] -> Service['neutron-l3'] -- Service['neutron-server'] -> Service['neutron-ovs-agent-service'] - Service['neutron-server'] -> Service['neutron-metadata'] - - include ::cinder -diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp -index b8fa89f..9911285 100644 ---- a/puppet/manifests/overcloud_controller_pacemaker.pp -+++ b/puppet/manifests/overcloud_controller_pacemaker.pp -@@ -380,6 +380,13 @@ if hiera('step') >= 2 { - - } - -+ if 'opendaylight' in hiera('neutron_mechanism_drivers') { -+ class {"opendaylight": -+ extra_features => ['odl-ovsdb-openstack', 'odl-dlux-core', 'odl-sfc-core', 'odl-sfc-ui', 'odl-sfc-sb-rest', 'odl-sfc-ovs', 'odl-sfc-netconf', 'odl-sfclisp', 'odl-sfcofl2'], -+ odl_rest_port => hiera('opendaylight_port'), -+ } -+ } -+ - exec { 'galera-ready' : - command => '/usr/bin/clustercheck >/dev/null', - timeout => 30, -@@ -604,13 +611,42 @@ if hiera('step') >= 3 { - tenant_network_types => [hiera('neutron_tenant_network_type')], - mechanism_drivers => [hiera('neutron_mechanism_drivers')], - } -- class { 'neutron::agents::ml2::ovs': -- manage_service => false, -- enabled => false, -- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), -- tunnel_types => split(hiera('neutron_tunnel_types'), ','), -- } -+ if 'opendaylight' in hiera('neutron_mechanism_drivers') { -+ -+ $opendaylight_port = hiera('opendaylight_port') -+ $private_ip = hiera('neutron::agents::ml2::ovs::local_ip') -+ -+ neutron_plugin_ml2 { -+ 'ml2_odl/username': value => 'admin'; -+ 'ml2_odl/password': value => 'admin'; -+ 'ml2_odl/url': value => "http://${controller_node_ips[0]}:${opendaylight_port}/controller/nb/v2/neutron"; -+ } - -+ exec { 'Wait for NetVirt OVSDB to come up': -+ command => "/bin/curl -o /dev/null --fail --silent --head -u admin:admin \ -+ http://${controller_node_ips[0]}:${opendaylight_port}/restconf/operational/network-topology:network-topology", -+ tries => 20, -+ try_sleep => 60, -+ } -> -+ # OVS manager -+ exec { 'Set OVS Manager to OpenDaylight': -+ command => "/usr/bin/ovs-vsctl set-manager tcp:${controller_node_ips[0]}:6640", -+ unless => "/usr/bin/ovs-vsctl show | /usr/bin/grep 'Manager \"tcp:${controller_node_ips[0]}:6640\"'", -+ } -> -+ # local ip -+ exec { 'Set local_ip Other Option': -+ command => "/usr/bin/ovs-vsctl set Open_vSwitch $(ovs-vsctl get Open_vSwitch . _uuid) other_config:local_ip=$private_ip", -+ unless => "/usr/bin/ovs-vsctl list Open_vSwitch | /usr/bin/grep 'local_ip=\"$private_ip\"'", -+ } -+ -+ } else { -+ class { 'neutron::agents::ml2::ovs': -+ manage_service => false, -+ enabled => false, -+ bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), -+ tunnel_types => split(hiera('neutron_tunnel_types'), ','), -+ } -+ } - if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') { - include ::neutron::plugins::ml2::cisco::ucsm - } -@@ -1059,56 +1095,13 @@ if hiera('step') >= 4 { - pacemaker::resource::service { $::neutron::params::dhcp_agent_service: - clone_params => "interleave=true", - } -- pacemaker::resource::service { $::neutron::params::ovs_agent_service: -- clone_params => "interleave=true", -- } - pacemaker::resource::service { $::neutron::params::metadata_agent_service: - clone_params => "interleave=true", - } -- pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: -- ocf_agent_name => "neutron:OVSCleanup", -- clone_params => "interleave=true", -- } - pacemaker::resource::ocf { 'neutron-netns-cleanup': - ocf_agent_name => "neutron:NetnsCleanup", - clone_params => "interleave=true", - } -- -- # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent -- pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint': -- constraint_type => "order", -- first_resource => "${::neutron::params::ovs_cleanup_service}-clone", -- second_resource => "neutron-netns-cleanup-clone", -- first_action => "start", -- second_action => "start", -- require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], -- Pacemaker::Resource::Ocf['neutron-netns-cleanup']], -- } -- pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation': -- source => "neutron-netns-cleanup-clone", -- target => "${::neutron::params::ovs_cleanup_service}-clone", -- score => "INFINITY", -- require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], -- Pacemaker::Resource::Ocf['neutron-netns-cleanup']], -- } -- pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint': -- constraint_type => "order", -- first_resource => "neutron-netns-cleanup-clone", -- second_resource => "${::neutron::params::ovs_agent_service}-clone", -- first_action => "start", -- second_action => "start", -- require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], -- Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], -- } -- pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation': -- source => "${::neutron::params::ovs_agent_service}-clone", -- target => "neutron-netns-cleanup-clone", -- score => "INFINITY", -- require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], -- Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], -- } -- -- #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3 - pacemaker::constraint::base { 'keystone-to-neutron-server-constraint': - constraint_type => "order", - first_resource => "${::keystone::params::service_name}-clone", -@@ -1118,31 +1111,75 @@ if hiera('step') >= 4 { - require => [Pacemaker::Resource::Service[$::keystone::params::service_name], - Pacemaker::Resource::Service[$::neutron::params::server_service]], - } -- pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': -- constraint_type => "order", -- first_resource => "${::neutron::params::server_service}-clone", -- second_resource => "${::neutron::params::ovs_agent_service}-clone", -- first_action => "start", -- second_action => "start", -- require => [Pacemaker::Resource::Service[$::neutron::params::server_service], -- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], -- } -- pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': -- constraint_type => "order", -- first_resource => "${::neutron::params::ovs_agent_service}-clone", -- second_resource => "${::neutron::params::dhcp_agent_service}-clone", -- first_action => "start", -- second_action => "start", -- require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], -- Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], -+ if 'openvswitch' in hiera('neutron_mechanism_drivers') { -+ pacemaker::resource::service { $::neutron::params::ovs_agent_service: -+ clone_params => "interleave=true", -+ } -+ pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service: -+ ocf_agent_name => "neutron:OVSCleanup", -+ clone_params => "interleave=true", -+ } -+ # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent -+ pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint': -+ constraint_type => "order", -+ first_resource => "${::neutron::params::ovs_cleanup_service}-clone", -+ second_resource => "neutron-netns-cleanup-clone", -+ first_action => "start", -+ second_action => "start", -+ require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], -+ Pacemaker::Resource::Ocf['neutron-netns-cleanup']], -+ } -+ pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation': -+ source => "neutron-netns-cleanup-clone", -+ target => "${::neutron::params::ovs_cleanup_service}-clone", -+ score => "INFINITY", -+ require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"], -+ Pacemaker::Resource::Ocf['neutron-netns-cleanup']], -+ } -+ pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint': -+ constraint_type => "order", -+ first_resource => "neutron-netns-cleanup-clone", -+ second_resource => "${::neutron::params::ovs_agent_service}-clone", -+ first_action => "start", -+ second_action => "start", -+ require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], -+ Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], -+ } -+ pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation': -+ source => "${::neutron::params::ovs_agent_service}-clone", -+ target => "neutron-netns-cleanup-clone", -+ score => "INFINITY", -+ require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"], -+ Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]], -+ } - -- } -- pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': -- source => "${::neutron::params::dhcp_agent_service}-clone", -- target => "${::neutron::params::ovs_agent_service}-clone", -- score => "INFINITY", -- require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], -- Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], -+ #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3 -+ pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint': -+ constraint_type => "order", -+ first_resource => "${::neutron::params::server_service}-clone", -+ second_resource => "${::neutron::params::ovs_agent_service}-clone", -+ first_action => "start", -+ second_action => "start", -+ require => [Pacemaker::Resource::Service[$::neutron::params::server_service], -+ Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]], -+ } -+ pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint': -+ constraint_type => "order", -+ first_resource => "${::neutron::params::ovs_agent_service}-clone", -+ second_resource => "${::neutron::params::dhcp_agent_service}-clone", -+ first_action => "start", -+ second_action => "start", -+ require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], -+ Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], -+ -+ } -+ pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation': -+ source => "${::neutron::params::dhcp_agent_service}-clone", -+ target => "${::neutron::params::ovs_agent_service}-clone", -+ score => "INFINITY", -+ require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"], -+ Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]], -+ } - } - pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint': - constraint_type => "order", --- -2.5.0 - diff --git a/build/opnfv-apex.spec b/build/opnfv-apex.spec index c1e3504c..88ec6425 100644 --- a/build/opnfv-apex.spec +++ b/build/opnfv-apex.spec @@ -1,5 +1,5 @@ Name: opnfv-apex -Version: 2.4 +Version: 2.5 Release: %{release} Summary: RDO Manager disk images for deployment @@ -42,8 +42,8 @@ install build/default-pool.xml %{buildroot}%{_var}/opt/opnfv/ install build/network-environment.yaml %{buildroot}%{_var}/opt/opnfv/ install build/nics/controller.yaml %{buildroot}%{_var}/opt/opnfv/nics/ install build/nics/compute.yaml %{buildroot}%{_var}/opt/opnfv/nics/ -install build/opendaylight.yaml %{buildroot}%{_var}/opt/opnfv/ -install build/opendaylight.patch %{buildroot}%{_var}/opt/opnfv/ +install build/opendaylight-puppet-neutron.patch %{buildroot}%{_var}/opt/opnfv/ +install build/opendaylight-tripleo-heat-templates.patch %{buildroot}%{_var}/opt/opnfv/ install build/instackenv-virt.json %{buildroot}%{_var}/opt/opnfv/ install build/instackenv.json.example %{buildroot}%{_var}/opt/opnfv/ @@ -68,8 +68,8 @@ install docs/release-notes.html %{buildroot}%{_docdir}/opnfv/ %{_var}/opt/opnfv/network-environment.yaml %{_var}/opt/opnfv/nics/controller.yaml %{_var}/opt/opnfv/nics/compute.yaml -%{_var}/opt/opnfv/opendaylight.yaml -%{_var}/opt/opnfv/opendaylight.patch +%{_var}/opt/opnfv/opendaylight-puppet-neutron.patch +%{_var}/opt/opnfv/opendaylight-tripleo-heat-templates.patch %{_var}/opt/opnfv/instackenv-virt.json %{_var}/opt/opnfv/instackenv.json.example %{_var}/opt/opnfv/stack/overcloud-full.qcow2 @@ -81,6 +81,8 @@ install docs/release-notes.html %{buildroot}%{_docdir}/opnfv/ %changelog +* Wed Dec 09 2015 Dan Radez <dradez@redhat.com> - 2.5-1 +- Updating the OpenDaylight Patch * Fri Dec 05 2015 Dan Radez <dradez@redhat.com> - 2.4-1 - Removing glance images * Fri Nov 20 2015 Dan Radez <dradez@redhat.com> - 2.3-1 diff --git a/ci/deploy.sh b/ci/deploy.sh index db802d2e..1c0d012c 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -400,17 +400,12 @@ function copy_materials_to_instack { echo "Copying configuration file and disk images to instack" scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.qcow2 "stack@$UNDERCLOUD": scp ${SSH_OPTIONS[@]} $NETENV "stack@$UNDERCLOUD": - scp ${SSH_OPTIONS[@]} $CONFIG/opendaylight.yaml "stack@$UNDERCLOUD": scp ${SSH_OPTIONS[@]} -r $CONFIG/nics/ "stack@$UNDERCLOUD": if [[ ${#deploy_options_array[@]} -eq 0 || ${deploy_options_array['sdn_controller']} == 'opendaylight' ]]; then - DEPLOY_OPTIONS+=" -e opendaylight.yaml" - ## WORK AROUND - # when OpenDaylight lands in upstream RDO manager this can be removed - # apply the opendaylight patch - scp ${SSH_OPTIONS[@]} $CONFIG/opendaylight.patch "root@$UNDERCLOUD": - ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cd /usr/share/openstack-tripleo-heat-templates/; patch -Np1 < /root/opendaylight.patch" - ## END WORK AROUND + DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight.yaml" + elif [ ${deploy_options_array['sdn_controller']} == 'opendaylight-external' ]; then + DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight-external.yaml" elif [ ${deploy_options_array['sdn_controller']} == 'onos' ]; then echo -e "${red}ERROR: ONOS is currently unsupported...exiting${reset}" exit 1 @@ -484,6 +479,7 @@ ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "openstack undercloud install > ape if [ $net_isolation_enabled == "TRUE" ]; then DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml" DEPLOY_OPTIONS+=" -e network-environment.yaml" + DEPLOY_OPTIONS+=" --ntp-server $ntp_server" fi ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI |