summaryrefslogtreecommitdiffstats
path: root/build
diff options
context:
space:
mode:
Diffstat (limited to 'build')
-rw-r--r--build/opnfv-tripleo-heat-templates.patch2062
-rwxr-xr-xbuild/undercloud.sh33
2 files changed, 17 insertions, 2078 deletions
diff --git a/build/opnfv-tripleo-heat-templates.patch b/build/opnfv-tripleo-heat-templates.patch
deleted file mode 100644
index 648dba32..00000000
--- a/build/opnfv-tripleo-heat-templates.patch
+++ /dev/null
@@ -1,2062 +0,0 @@
-From ca87d09638f46ba49a866832030970bf43ade74e Mon Sep 17 00:00:00 2001
-From: Tim Rozet <tdrozet@gmail.com>
-Date: Tue, 12 Jan 2016 16:49:57 -0500
-Subject: [PATCH] Adds current opnfv patch with ODL and ONOS support
-
----
- environments/onos.yaml | 8 +
- environments/opendaylight-external.yaml | 25 ++
- environments/opendaylight.yaml | 26 ++
- environments/opendaylight_l3.yaml | 9 +
- environments/opendaylight_sdnvpn.yaml | 29 ++
- environments/opendaylight_sfc.yaml | 28 ++
- network/endpoints/endpoint_map.yaml | 31 ++
- overcloud-resource-registry-puppet.yaml | 3 +
- overcloud-without-mergepy.yaml | 103 +++++
- puppet/all-nodes-config.yaml | 27 ++
- puppet/compute.yaml | 41 ++
- puppet/controller.yaml | 104 ++++-
- puppet/hieradata/common.yaml | 1 +
- puppet/hieradata/controller.yaml | 5 +-
- puppet/manifests/overcloud_compute.pp | 49 ++-
- puppet/manifests/overcloud_controller.pp | 128 +++++-
- puppet/manifests/overcloud_controller_pacemaker.pp | 482 ++++++++++++++-------
- puppet/manifests/overcloud_opendaylight.pp | 27 ++
- puppet/opendaylight-puppet.yaml | 223 ++++++++++
- puppet/vip-config.yaml | 1 +
- 20 files changed, 1188 insertions(+), 162 deletions(-)
- create mode 100644 environments/onos.yaml
- create mode 100644 environments/opendaylight-external.yaml
- create mode 100644 environments/opendaylight.yaml
- create mode 100644 environments/opendaylight_l3.yaml
- create mode 100644 environments/opendaylight_sdnvpn.yaml
- create mode 100644 environments/opendaylight_sfc.yaml
- create mode 100644 puppet/manifests/overcloud_opendaylight.pp
- create mode 100644 puppet/opendaylight-puppet.yaml
-
-diff --git a/environments/onos.yaml b/environments/onos.yaml
-new file mode 100644
-index 0000000..510aca9
---- /dev/null
-+++ b/environments/onos.yaml
-@@ -0,0 +1,8 @@
-+parameters:
-+ #This a bug for odl deployment. Once bug fixed OpenDaylightCount can be remove.
-+ OpenDaylightCount: 0
-+ NeutronL3HA: false
-+ ExtraConfig:
-+ neutron_service_plugins: ['onos_router']
-+ neutron_mechanism_drivers: ['onos_ml2']
-+ neutron_tenant_network_type: vxlan
-diff --git a/environments/opendaylight-external.yaml b/environments/opendaylight-external.yaml
-new file mode 100644
-index 0000000..411df21
---- /dev/null
-+++ b/environments/opendaylight-external.yaml
-@@ -0,0 +1,25 @@
-+# Environment file used to enable OpenDaylight
-+# Currently uses overcloud image that is assumed
-+# to be virt-customized with ODL RPM already on it
-+
-+# These parameters customize the OpenDaylight Node
-+# The user name and password are for the ODL service
-+# Defaults are included here for reference
-+#parameter_defaults:
-+# OpenDaylightFlavor: baremetal
-+# OpenDaylightHostname: opendaylight-server
-+# OpenDaylightImage: overcloud-full
-+# OpenDaylightUsername: admin
-+# OpenDaylightPassword: admin
-+
-+parameters:
-+ # increase this if you need more ODL nodes
-+ OpenDaylightCount: 1
-+ NeutronL3HA: false
-+ ExtraConfig:
-+ neutron_mechanism_drivers: ['opendaylight']
-+ neutron_tenant_network_type: vxlan
-+ # Enable this if you want OpenDaylight on the contollers
-+ # reduce OpenDaylightCount to 0 if you don't want any
-+ # OpenDaylight only nodes
-+ #opendaylight_install: true
-diff --git a/environments/opendaylight.yaml b/environments/opendaylight.yaml
-new file mode 100644
-index 0000000..cfa4ad3
---- /dev/null
-+++ b/environments/opendaylight.yaml
-@@ -0,0 +1,26 @@
-+# Environment file used to enable OpenDaylight
-+# Currently uses overcloud image that is assumed
-+# to be virt-customized with ODL RPM already on it
-+
-+# These parameters customize the OpenDaylight Node
-+# The user name and password are for the ODL service
-+# Defaults are included here for reference
-+#parameter_defaults:
-+# OpenDaylightFlavor: baremetal
-+# OpenDaylightHostname: opendaylight-server
-+# OpenDaylightImage: overcloud-full
-+# OpenDaylightUsername: admin
-+# OpenDaylightPassword: admin
-+
-+parameters:
-+ # increase this if you need more ODL nodes
-+ # OpenDaylightCount: 1
-+ NeutronL3HA: false
-+ OpenDaylightEnableHA: true
-+ ExtraConfig:
-+ neutron_mechanism_drivers: ['opendaylight']
-+ neutron_tenant_network_type: vxlan
-+ # Enable this if you want OpenDaylight on the contollers
-+ # reduce OpenDaylightCount to 0 if you don't want any
-+ # OpenDaylight only nodes
-+ opendaylight_install: true
-diff --git a/environments/opendaylight_l3.yaml b/environments/opendaylight_l3.yaml
-new file mode 100644
-index 0000000..05c0aff
---- /dev/null
-+++ b/environments/opendaylight_l3.yaml
-@@ -0,0 +1,9 @@
-+parameters:
-+ #NeutronEnableL3Agent: false
-+ NeutronEnableForceMetadata: true
-+ OpenDaylightEnableL3: "'yes'"
-+ NeutronServicePlugins: "networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin"
-+ ExtraConfig:
-+ neutron_mechanism_drivers: ['opendaylight']
-+ neutron_tenant_network_type: vxlan
-+ opendaylight_install: true
-diff --git a/environments/opendaylight_sdnvpn.yaml b/environments/opendaylight_sdnvpn.yaml
-new file mode 100644
-index 0000000..3a14975
---- /dev/null
-+++ b/environments/opendaylight_sdnvpn.yaml
-@@ -0,0 +1,29 @@
-+# Environment file used to enable OpenDaylight
-+# Currently uses overcloud image that is assumed
-+# to be virt-customized with ODL RPM already on it
-+
-+# These parameters customize the OpenDaylight Node
-+# The user name and password are for the ODL service
-+# Defaults are included here for reference
-+#parameter_defaults:
-+# OpenDaylightFlavor: baremetal
-+# OpenDaylightHostname: opendaylight-server
-+# OpenDaylightImage: overcloud-full
-+# OpenDaylightUsername: admin
-+# OpenDaylightPassword: admin
-+
-+parameters:
-+ # increase this if you need more ODL nodes
-+ # OpenDaylightCount: 1
-+ ControllerEnableSwiftStorage: false
-+ OpenDaylightFeatures: "odl-ovsdb-openstack,odl-vpnservice-api,odl-vpnservice-impl,odl-vpnservice-impl-rest,odl-vpnservice-impl-ui,odl-vpnservice-core"
-+ NeutronL3HA: false
-+ NeutronServicePlugins: "router,qos,networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin"
-+ ExtraConfig:
-+ tripleo::ringbuilder::build_ring: False
-+ neutron_mechanism_drivers: ['opendaylight']
-+ neutron_tenant_network_type: vxlan
-+ # Enable this if you want OpenDaylight on the contollers
-+ # reduce OpenDaylightCount to 0 if you don't want any
-+ # OpenDaylight only nodes
-+ opendaylight_install: true
-diff --git a/environments/opendaylight_sfc.yaml b/environments/opendaylight_sfc.yaml
-new file mode 100644
-index 0000000..3dd1e13
---- /dev/null
-+++ b/environments/opendaylight_sfc.yaml
-@@ -0,0 +1,28 @@
-+# Environment file used to enable OpenDaylight
-+# Currently uses overcloud image that is assumed
-+# to be virt-customized with ODL RPM already on it
-+
-+# These parameters customize the OpenDaylight Node
-+# The user name and password are for the ODL service
-+# Defaults are included here for reference
-+#parameter_defaults:
-+# OpenDaylightFlavor: baremetal
-+# OpenDaylightHostname: opendaylight-server
-+# OpenDaylightImage: overcloud-full
-+# OpenDaylightUsername: admin
-+# OpenDaylightPassword: admin
-+
-+parameters:
-+ # increase this if you need more ODL nodes
-+ # OpenDaylightCount: 1
-+ ControllerEnableSwiftStorage: false
-+ OpenDaylightFeatures: "odl-ovsdb-sfc-rest"
-+ NeutronL3HA: false
-+ ExtraConfig:
-+ tripleo::ringbuilder::build_ring: False
-+ neutron_mechanism_drivers: ['opendaylight']
-+ neutron_tenant_network_type: vxlan
-+ # Enable this if you want OpenDaylight on the contollers
-+ # reduce OpenDaylightCount to 0 if you don't want any
-+ # OpenDaylight only nodes
-+ opendaylight_install: true
-diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
-index 0521401..7caa91b 100644
---- a/network/endpoints/endpoint_map.yaml
-+++ b/network/endpoints/endpoint_map.yaml
-@@ -4,6 +4,9 @@ description: >
- A Map of OpenStack Endpoints
-
- parameters:
-+ AodhApiVirtualIP:
-+ type: string
-+ default: ''
- CeilometerApiVirtualIP:
- type: string
- default: ''
-@@ -43,6 +46,9 @@ parameters:
- EndpointMap:
- type: json
- default:
-+ AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
-+ AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
-+ AodhPublic: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
- CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
- CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
- CeilometerPublic: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
-@@ -83,6 +89,28 @@ parameters:
-
- resources:
-
-+ AodhInternal:
-+ type: OS::TripleO::Endpoint
-+ properties:
-+ EndpointName: AodhInternal
-+ EndpointMap: { get_param: EndpointMap }
-+ CloudName: {get_param: CloudName}
-+ IP: {get_param: AodhApiVirtualIP}
-+ AodhPublic:
-+ type: OS::TripleO::Endpoint
-+ properties:
-+ EndpointName: AodhPublic
-+ EndpointMap: { get_param: EndpointMap }
-+ CloudName: {get_param: CloudName}
-+ IP: {get_param: PublicVirtualIP}
-+ AodhAdmin:
-+ type: OS::TripleO::Endpoint
-+ properties:
-+ EndpointName: AodhAdmin
-+ EndpointMap: { get_param: EndpointMap }
-+ CloudName: {get_param: CloudName}
-+ IP: {get_param: AodhApiVirtualIP}
-+
- CeilometerInternal:
- type: OS::TripleO::Endpoint
- properties:
-@@ -407,6 +435,9 @@ resources:
- outputs:
- endpoint_map:
- value:
-+ AodhInternal: {get_attr: [ AodhInternal, endpoint] }
-+ AodhPublic: {get_attr: [ AodhPublic, endpoint] }
-+ AodhAdmin: {get_attr: [ AodhAdmin, endpoint] }
- CeilometerInternal: {get_attr: [ CeilometerInternal, endpoint] }
- CeilometerPublic: {get_attr: [ CeilometerPublic, endpoint] }
- CeilometerAdmin: {get_attr: [ CeilometerAdmin, endpoint] }
-diff --git a/overcloud-resource-registry-puppet.yaml b/overcloud-resource-registry-puppet.yaml
-index 4cfed6b..adecc79 100644
---- a/overcloud-resource-registry-puppet.yaml
-+++ b/overcloud-resource-registry-puppet.yaml
-@@ -27,6 +27,9 @@ resource_registry:
- # To disable, replace with firstboot/userdata_default.yaml
- OS::TripleO::NodeAdminUserData: firstboot/userdata_heat_admin.yaml
-
-+ # This configures OpenDaylight to drive the network
-+ OS::TripleO::OpenDaylightNode: puppet/opendaylight-puppet.yaml
-+
- # Hooks for operator extra config
- # NodeUserData == Cloud-init additional user-data, e.g cloud-config
- # ControllerExtraConfigPre == Controller configuration pre service deployment
-diff --git a/overcloud-without-mergepy.yaml b/overcloud-without-mergepy.yaml
-index a532c2f..9c6e3cd 100644
---- a/overcloud-without-mergepy.yaml
-+++ b/overcloud-without-mergepy.yaml
-@@ -15,6 +15,11 @@ parameters:
- description: The password for the keystone admin account, used for monitoring, querying neutron etc.
- type: string
- hidden: true
-+ AodhPassword:
-+ default: unset
-+ description: The password for the aodh services
-+ type: string
-+ hidden: true
- CeilometerBackend:
- default: 'mongodb'
- description: The ceilometer backend type.
-@@ -113,6 +118,10 @@ parameters:
- default: ''
- type: string
- description: Neutron ID for ctlplane network.
-+ NeutronEnableForceMetadata:
-+ default: 'False'
-+ description: If True, DHCP always provides metadata route to VM.
-+ type: string
- NeutronEnableTunnelling:
- type: string
- default: "True"
-@@ -227,6 +236,35 @@ parameters:
- default: false
- description: Should MongoDb journaling be disabled
- type: boolean
-+ OpenDaylightPort:
-+ default: 8081
-+ description: Set opendaylight service port
-+ type: number
-+ OpenDaylightEnableL3:
-+ description: Knob to enable/disable ODL L3
-+ type: string
-+ default: 'no'
-+ OpenDaylightEnableHA:
-+ description: Knob to enable/disable ODL HA
-+ type: boolean
-+ default: false
-+ OpenDaylightFeatures:
-+ description: List of features to install with ODL
-+ type: comma_delimited_list
-+ default: "odl-ovsdb-openstack"
-+ OpenDaylightInstall:
-+ default: false
-+ description: Whether to install OpenDaylight on the control nodes.
-+ type: boolean
-+ OpenDaylightUsername:
-+ default: 'admin'
-+ description: The username for the opendaylight server.
-+ type: string
-+ OpenDaylightPassword:
-+ default: 'admin'
-+ type: string
-+ description: The password for the opendaylight server.
-+ hidden: true
- PublicVirtualFixedIPs:
- default: []
- description: >
-@@ -575,6 +613,8 @@ parameters:
- default:
- NeutronTenantNetwork: tenant
- CeilometerApiNetwork: internal_api
-+ AodhApiNetwork: internal_api
-+ OpenDaylightApiNetwork: internal_api
- MongoDbNetwork: internal_api
- CinderApiNetwork: internal_api
- CinderIscsiNetwork: storage
-@@ -664,6 +704,18 @@ parameters:
- structure as ExtraConfig.
- type: json
-
-+# OpenDaylight specific parameters
-+ OpenDaylightCount:
-+ type: number
-+ default: 0
-+ OpenDaylightImage:
-+ default: overcloud-full
-+ type: string
-+ OpenDaylightFlavor:
-+ default: baremetal
-+ description: Flavor for OpenDaylight node
-+ type: string
-+
- # Hostname format for each role
- # Note %index% is translated into the index of the node, e.g 0/1/2 etc
- # and %stackname% is replaced with OS::stack_name in the template below.
-@@ -688,6 +740,10 @@ parameters:
- type: string
- description: Format for CephStorage node hostnames
- default: '%stackname%-cephstorage-%index%'
-+ OpenDaylightHostnameFormat:
-+ type: string
-+ description: Format for OpenDaylight node hostnames
-+ default: '%stackname%-opendaylight-%index%'
-
- # Identifiers to trigger tasks on nodes
- UpdateIdentifier:
-@@ -758,6 +814,7 @@ resources:
- properties:
- CloudName: {get_param: CloudName}
- CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
-+ AodhApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
- CinderApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
- GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
- GlanceRegistryVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
-@@ -770,6 +827,29 @@ resources:
- SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
- PublicVirtualIP: {get_attr: [VipMap, net_ip_map, external]}
-
-+ OpenDaylightNode:
-+ type: OS::Heat::ResourceGroup
-+ depends_on: Networks
-+ properties:
-+ count: {get_param: OpenDaylightCount}
-+ removal_policies: {get_param: ComputeRemovalPolicies}
-+ resource_def:
-+ type: OS::TripleO::OpenDaylightNode
-+ properties:
-+ UpdateIdentifier: {get_param: UpdateIdentifier}
-+ OpenDaylightFlavor: {get_param: OpenDaylightFlavor}
-+ OpenDaylightImage: {get_param: OpenDaylightImage}
-+ OpenDaylightPort: {get_param: OpenDaylightPort}
-+ OpenDaylightUsername: {get_param: OpenDaylightUsername}
-+ OpenDaylightFeatures: {get_param: OpenDaylightFeatures}
-+ OpenDaylightPassword: {get_param: OpenDaylightPassword}
-+ OpenDaylightEnableL3: {get_param: OpenDaylightEnableL3}
-+ OpenDaylightHostname:
-+ str_replace:
-+ template: {get_param: OpenDaylightHostnameFormat}
-+ params:
-+ '%stackname%': {get_param: 'OS::stack_name'}
-+
- Controller:
- type: OS::Heat::ResourceGroup
- depends_on: Networks
-@@ -781,6 +861,7 @@ resources:
- properties:
- AdminPassword: {get_param: AdminPassword}
- AdminToken: {get_param: AdminToken}
-+ AodhPassword: {get_param: AodhPassword}
- CeilometerBackend: {get_param: CeilometerBackend}
- CeilometerMeteringSecret: {get_param: CeilometerMeteringSecret}
- CeilometerPassword: {get_param: CeilometerPassword}
-@@ -832,6 +913,7 @@ resources:
- NeutronBridgeMappings: {get_param: NeutronBridgeMappings}
- NeutronExternalNetworkBridge: {get_param: NeutronExternalNetworkBridge}
- NeutronEnableTunnelling: {get_param: NeutronEnableTunnelling}
-+ NeutronEnableForceMetadata: {get_param: NeutronEnableForceMetadata}
- NeutronNetworkVLANRanges: {get_param: NeutronNetworkVLANRanges}
- NeutronPublicInterface: {get_param: NeutronPublicInterface}
- NeutronPublicInterfaceDefaultRoute: {get_param: NeutronPublicInterfaceDefaultRoute}
-@@ -853,6 +935,13 @@ resources:
- NovaPassword: {get_param: NovaPassword}
- NtpServer: {get_param: NtpServer}
- MongoDbNoJournal: {get_param: MongoDbNoJournal}
-+ OpenDaylightPort: {get_param: OpenDaylightPort}
-+ OpenDaylightInstall: {get_param: OpenDaylightInstall}
-+ OpenDaylightUsername: {get_param: OpenDaylightUsername}
-+ OpenDaylightFeatures: {get_param: OpenDaylightFeatures}
-+ OpenDaylightPassword: {get_param: OpenDaylightPassword}
-+ OpenDaylightEnableL3: {get_param: OpenDaylightEnableL3}
-+ OpenDaylightEnableHA: {get_param: OpenDaylightEnableHA}
- PcsdPassword: {get_resource: PcsdPassword}
- PublicVirtualInterface: {get_param: PublicVirtualInterface}
- RabbitPassword: {get_param: RabbitPassword}
-@@ -878,6 +967,8 @@ resources:
- ServiceNetMap: {get_param: ServiceNetMap}
- EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
- CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
-+ AodhApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
-+ OpenDaylightApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, OpenDaylightApiNetwork]}]}
- CinderApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
- HeatApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
- GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
-@@ -948,6 +1039,10 @@ resources:
- NovaPublicIP: {get_attr: [PublicVirtualIP, ip_address]}
- NovaPassword: {get_param: NovaPassword}
- NtpServer: {get_param: NtpServer}
-+ OpenDaylightPort: {get_param: OpenDaylightPort}
-+ OpenDaylightUsername: {get_param: OpenDaylightUsername}
-+ OpenDaylightPassword: {get_param: OpenDaylightPassword}
-+ OpenDaylightEnableHA: {get_param: OpenDaylightEnableHA}
- RabbitHost: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
- RabbitPassword: {get_param: RabbitPassword}
- RabbitUserName: {get_param: RabbitUserName}
-@@ -1068,6 +1163,7 @@ resources:
- compute_hosts: {get_attr: [Compute, hosts_entry]}
- controller_hosts: {get_attr: [Controller, hosts_entry]}
- controller_ips: {get_attr: [Controller, ip_address]}
-+ opendaylight_ip: {get_attr: [OpenDaylightNode, ip_address]}
- block_storage_hosts: {get_attr: [BlockStorage, hosts_entry]}
- object_storage_hosts: {get_attr: [ObjectStorage, hosts_entry]}
- ceph_storage_hosts: {get_attr: [CephStorage, hosts_entry]}
-@@ -1081,6 +1177,8 @@ resources:
- heat_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
- swift_proxy_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
- ceilometer_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
-+ aodh_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
-+ opendaylight_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, OpenDaylightApiNetwork]}]}
- nova_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
- nova_metadata_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
- glance_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
-@@ -1189,6 +1287,8 @@ resources:
- nova_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
- nova_metadata_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
- ceilometer_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
-+ aodh_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
-+ opendaylight_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, OpenDaylightApiNetwork]}]}
- heat_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
- horizon_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
- redis_vip: {get_attr: [RedisVirtualIP, ip_address]}
-@@ -1434,6 +1534,9 @@ outputs:
- PublicVip:
- description: Controller VIP for public API endpoints
- value: {get_attr: [PublicVirtualIP, ip_address]}
-+ AodhInternalVip:
-+ description: VIP for Aodh API internal endpoint
-+ value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
- CeilometerInternalVip:
- description: VIP for Ceilometer API internal endpoint
- value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
-diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
-index 2bc519b..1ebaff5 100644
---- a/puppet/all-nodes-config.yaml
-+++ b/puppet/all-nodes-config.yaml
-@@ -8,6 +8,8 @@ parameters:
- type: comma_delimited_list
- controller_ips:
- type: comma_delimited_list
-+ opendaylight_ip:
-+ type: comma_delimited_list
- block_storage_hosts:
- type: comma_delimited_list
- object_storage_hosts:
-@@ -34,6 +36,10 @@ parameters:
- type: comma_delimited_list
- ceilometer_api_node_ips:
- type: comma_delimited_list
-+ aodh_api_node_ips:
-+ type: comma_delimited_list
-+ opendaylight_api_node_ips:
-+ type: comma_delimited_list
- nova_api_node_ips:
- type: comma_delimited_list
- nova_metadata_node_ips:
-@@ -82,6 +88,10 @@ resources:
- raw_data: {get_file: hieradata/RedHat.yaml}
- all_nodes:
- mapped_data:
-+ opendaylight_controller_ip:
-+ list_join:
-+ - ','
-+ - {get_param: opendaylight_ip}
- controller_node_ips:
- list_join:
- - ','
-@@ -166,6 +176,22 @@ resources:
- list_join:
- - "','"
- - {get_param: ceilometer_api_node_ips}
-+ aodh_api_node_ips:
-+ str_replace:
-+ template: "['SERVERS_LIST']"
-+ params:
-+ SERVERS_LIST:
-+ list_join:
-+ - "','"
-+ - {get_param: aodh_api_node_ips}
-+ opendaylight_api_node_ips:
-+ str_replace:
-+ template: "['SERVERS_LIST']"
-+ params:
-+ SERVERS_LIST:
-+ list_join:
-+ - "','"
-+ - {get_param: opendaylight_api_node_ips}
- nova_api_node_ips:
- str_replace:
- template: "['SERVERS_LIST']"
-@@ -239,6 +265,7 @@ resources:
- neutron::rabbit_hosts: *rabbit_nodes_array
- nova::rabbit_hosts: *rabbit_nodes_array
- keystone::rabbit_hosts: *rabbit_nodes_array
-+ aodh::rabbit_hosts: *rabbit_nodes_array
-
- outputs:
- config_id:
-diff --git a/puppet/compute.yaml b/puppet/compute.yaml
-index 70c7403..ba7cbfd 100644
---- a/puppet/compute.yaml
-+++ b/puppet/compute.yaml
-@@ -213,6 +213,27 @@ parameters:
- NtpServer:
- type: string
- default: ''
-+ OpenDaylightPort:
-+ default: 8081
-+ description: Set opendaylight service port
-+ type: number
-+ OpenDaylightUsername:
-+ default: 'admin'
-+ description: The username for the opendaylight server.
-+ type: string
-+ OpenDaylightPassword:
-+ default: 'admin'
-+ type: string
-+ description: The password for the opendaylight server.
-+ hidden: true
-+ OpenDaylightEnableHA:
-+ description: Knob to enable/disable ODL HA
-+ type: boolean
-+ default: false
-+ ONOSPort:
-+ default: 8181
-+ description: Set onos service port
-+ type: number
- RabbitHost:
- type: string
- default: '' # Has to be here because of the ignored empty value bug
-@@ -320,6 +341,11 @@ resources:
- properties:
- ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
-
-+ ExternalPort:
-+ type: OS::TripleO::Controller::Ports::ExternalPort
-+ properties:
-+ ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
-+
- NetIpMap:
- type: OS::TripleO::Network::Ports::NetIpMap
- properties:
-@@ -327,6 +353,7 @@ resources:
- InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
- StorageIp: {get_attr: [StoragePort, ip_address]}
- TenantIp: {get_attr: [TenantPort, ip_address]}
-+ ExternalIp: {get_attr: [ExternalPort, ip_address]}
-
- NetworkConfig:
- type: OS::TripleO::Compute::Net::SoftwareConfig
-@@ -335,6 +362,7 @@ resources:
- InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
- StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
- TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
-+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
-
- NetworkDeployment:
- type: OS::TripleO::SoftwareDeployment
-@@ -406,6 +434,11 @@ resources:
- neutron::rabbit_user: {get_input: rabbit_user}
- neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
- neutron::rabbit_port: {get_input: rabbit_client_port}
-+ opendaylight_port: {get_input: opendaylight_port}
-+ opendaylight_username: {get_input: opendaylight_username}
-+ opendaylight_password: {get_input: opendaylight_password}
-+ opendaylight_enable_ha: {get_input: opendaylight_enable_ha}
-+ onos_port: {get_input: onos_port}
- neutron_flat_networks: {get_input: neutron_flat_networks}
- neutron_host: {get_input: neutron_host}
- neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip}
-@@ -459,6 +492,11 @@ resources:
- snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
- snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
- glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
-+ opendaylight_port: {get_param: OpenDaylightPort}
-+ opendaylight_username: {get_param: OpenDaylightUsername}
-+ opendaylight_password: {get_param: OpenDaylightPassword}
-+ opendaylight_enable_ha: {get_param: OpenDaylightEnableHA}
-+ onos_port: {get_param: ONOSPort}
- neutron_flat_networks: {get_param: NeutronFlatNetworks}
- neutron_host: {get_param: NeutronHost}
- neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]}
-@@ -570,6 +608,9 @@ outputs:
- tenant_ip_address:
- description: IP address of the server in the tenant network
- value: {get_attr: [TenantPort, ip_address]}
-+ external_ip_address:
-+ description: IP address of the server in the external network
-+ value: {get_attr: [ExternalPort, ip_address]}
- hostname:
- description: Hostname of the server
- value: {get_attr: [NovaCompute, name]}
-diff --git a/puppet/controller.yaml b/puppet/controller.yaml
-index ea0b3af..0a3668e 100644
---- a/puppet/controller.yaml
-+++ b/puppet/controller.yaml
-@@ -14,6 +14,14 @@ parameters:
- description: The keystone auth secret and db password.
- type: string
- hidden: true
-+ AodhApiVirtualIP:
-+ type: string
-+ default: ''
-+ AodhPassword:
-+ default: unset
-+ description: The password for the aodh services.
-+ type: string
-+ hidden: true
- CeilometerApiVirtualIP:
- type: string
- default: ''
-@@ -357,6 +365,10 @@ parameters:
- default: 'True'
- description: Allow automatic l3-agent failover
- type: string
-+ NeutronEnableForceMetadata:
-+ default: 'False'
-+ description: If True, DHCP always provides metadata route to VM.
-+ type: string
- NeutronEnableTunnelling:
- type: string
- default: "True"
-@@ -443,6 +455,42 @@ parameters:
- NtpServer:
- type: string
- default: ''
-+ OpenDaylightPort:
-+ default: 8081
-+ description: Set opendaylight service port
-+ type: number
-+ OpenDaylightInstall:
-+ default: false
-+ description: Whether to install OpenDaylight on the control nodes.
-+ type: boolean
-+ OpenDaylightUsername:
-+ default: 'admin'
-+ description: The username for the opendaylight server.
-+ type: string
-+ OpenDaylightPassword:
-+ default: 'admin'
-+ type: string
-+ description: The password for the opendaylight server.
-+ hidden: true
-+ OpenDaylightEnableL3:
-+ description: Knob to enable/disable ODL L3
-+ type: string
-+ default: 'no'
-+ OpenDaylightEnableHA:
-+ description: Knob to enable/disable ODL HA
-+ type: boolean
-+ default: false
-+ OpenDaylightFeatures:
-+ description: List of features to install with ODL
-+ type: comma_delimited_list
-+ default: "odl-ovsdb-openstack"
-+ OpenDaylightApiVirtualIP:
-+ type: string
-+ default: ''
-+ ONOSPort:
-+ default: 8181
-+ description: Set onos service port
-+ type: number
- PcsdPassword:
- type: string
- description: The password for the 'pcsd' user.
-@@ -696,6 +744,7 @@ resources:
- input_values:
- bootstack_nodeid: {get_attr: [Controller, name]}
- neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
-+ neutron_enable_force_metadata: {get_param: NeutronEnableForceMetadata}
- haproxy_log_address: {get_param: HAProxySyslogAddress}
- heat.watch_server_url:
- list_join:
-@@ -774,6 +823,7 @@ resources:
- - {get_param: MysqlVirtualIP}
- - '/heat'
- keystone_ca_certificate: {get_param: KeystoneCACertificate}
-+ keystone_admin_vip: {get_param: KeystoneAdminApiVirtualIP}
- keystone_signing_key: {get_param: KeystoneSigningKey}
- keystone_signing_certificate: {get_param: KeystoneSigningCertificate}
- keystone_ssl_certificate: {get_param: KeystoneSSLCertificate}
-@@ -805,6 +855,14 @@ resources:
- template: tripleo-CLUSTER
- params:
- CLUSTER: {get_param: MysqlClusterUniquePart}
-+ opendaylight_port: {get_param: OpenDaylightPort}
-+ opendaylight_install: {get_param: OpenDaylightInstall}
-+ opendaylight_username: {get_param: OpenDaylightUsername}
-+ opendaylight_password: {get_param: OpenDaylightPassword}
-+ opendaylight_enable_l3: {get_param: OpenDaylightEnableL3}
-+ opendaylight_enable_ha: {get_param: OpenDaylightEnableHA}
-+ opendaylight_features: {get_param: OpenDaylightFeatures}
-+ onos_port: {get_param: ONOSPort}
- neutron_flat_networks: {get_param: NeutronFlatNetworks}
- neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
- neutron_agent_mode: {get_param: NeutronAgentMode}
-@@ -879,6 +937,7 @@ resources:
- ceilometer_backend: {get_param: CeilometerBackend}
- ceilometer_metering_secret: {get_param: CeilometerMeteringSecret}
- ceilometer_password: {get_param: CeilometerPassword}
-+ aodh_password: {get_param: AodhPassword}
- ceilometer_coordination_url:
- list_join:
- - ''
-@@ -891,6 +950,12 @@ resources:
- - - 'mysql://ceilometer:unset@'
- - {get_param: MysqlVirtualIP}
- - '/ceilometer'
-+ ceilometer_public_url: {get_param: [EndpointMap, CeilometerPublic, uri]}
-+ ceilometer_internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]}
-+ ceilometer_admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
-+ aodh_public_url: {get_param: [EndpointMap, AodhPublic, uri]}
-+ aodh_internal_url: {get_param: [EndpointMap, AodhInternal, uri]}
-+ aodh_admin_url: {get_param: [EndpointMap, AodhAdmin, uri]}
- snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
- snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
- nova_password: {get_param: NovaPassword}
-@@ -948,6 +1013,8 @@ resources:
- neutron_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
- neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]}
- ceilometer_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
-+ aodh_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
-+ opendaylight_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, OpenDaylightApiNetwork]}]}
- nova_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
- nova_metadata_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
- horizon_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
-@@ -1041,7 +1108,7 @@ resources:
- cinder_iscsi_ip_address: {get_input: cinder_iscsi_network}
- cinder::database_connection: {get_input: cinder_dsn}
- cinder::api::keystone_password: {get_input: cinder_password}
-- cinder::api::auth_uri: {get_input: keystone_auth_uri}
-+ cinder::api::keystone_auth_host: {get_input: keystone_admin_vip}
- cinder::api::identity_uri: {get_input: keystone_identity_uri}
- cinder::api::bind_host: {get_input: cinder_api_network}
- cinder::rabbit_userid: {get_input: rabbit_username}
-@@ -1136,6 +1203,18 @@ resources:
- mysql_bind_host: {get_input: mysql_network}
- mysql_virtual_ip: {get_input: mysql_virtual_ip}
-
-+ # OpenDaylight
-+ opendaylight_port: {get_input: opendaylight_port}
-+ opendaylight_install: {get_input: opendaylight_install}
-+ opendaylight_username: {get_input: opendaylight_username}
-+ opendaylight_password: {get_input: opendaylight_password}
-+ opendaylight_enable_l3: {get_input: opendaylight_enable_l3}
-+ opendaylight_enable_ha: {get_input: opendaylight_enable_ha}
-+ opendaylight_features: {get_input: opendaylight_features}
-+
-+ # ONOS
-+ onos_port: {get_input: onos_port}
-+
- # Neutron
- neutron::bind_host: {get_input: neutron_api_network}
- neutron::rabbit_password: {get_input: rabbit_password}
-@@ -1152,6 +1231,7 @@ resources:
- neutron_flat_networks: {get_input: neutron_flat_networks}
- neutron::agents::metadata::shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
- neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network}
-+ neutron::agents::dhcp::enable_force_metadata: {get_input: neutron_enable_force_metadata}
- neutron_agent_mode: {get_input: neutron_agent_mode}
- neutron_router_distributed: {get_input: neutron_router_distributed}
- neutron::core_plugin: {get_input: neutron_core_plugin}
-@@ -1198,6 +1278,27 @@ resources:
- snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
- snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
-
-+ # Aodh
-+ aodh::rabbit_userid: {get_input: rabbit_username}
-+ aodh::rabbit_password: {get_input: rabbit_password}
-+ aodh::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
-+ aodh::rabbit_port: {get_input: rabbit_client_port}
-+ aodh::debug: {get_input: debug}
-+ aodh::wsgi::apache::ssl: false
-+ aodh::api::service_name: 'httpd'
-+ aodh::api::host: {get_input: aodh_api_network}
-+ aodh::api::keystone_password: {get_input: aodh_password}
-+ aodh::api::keystone_auth_uri: {get_input: keystone_auth_uri}
-+ aodh::api::keystone_identity_uri: {get_input: keystone_identity_uri}
-+ aodh::auth::auth_password: {get_input: aodh_password}
-+ aodh::keystone::auth::public_url: {get_input: aodh_public_url }
-+ aodh::keystone::auth::internal_url: {get_input: aodh_internal_url }
-+ aodh::keystone::auth::admin_url: {get_input: aodh_admin_url }
-+ aodh::keystone::auth::password: {get_input: aodh_password }
-+ aodh::keystone::auth::region: {get_input: keystone_region}
-+ # for a migration path from ceilometer-alarm to aodh, we use the same database & coordination
-+ aodh::evaluator::coordination_url: {get_input: ceilometer_coordination_url}
-+
- # Nova
- nova::rabbit_userid: {get_input: rabbit_username}
- nova::rabbit_password: {get_input: rabbit_password}
-@@ -1244,6 +1345,7 @@ resources:
- tripleo::loadbalancer::haproxy_log_address: {get_input: haproxy_log_address}
- tripleo::packages::enable_install: {get_input: enable_package_install}
- tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
-+ tripleo::loadbalancer::opendaylight: {get_input: opendaylight_enable_ha}
-
- # Hook for site-specific additional pre-deployment config, e.g extra hieradata
- ControllerExtraConfigPre:
-diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml
-index 030f661..5840016 100644
---- a/puppet/hieradata/common.yaml
-+++ b/puppet/hieradata/common.yaml
-@@ -6,6 +6,7 @@ ceilometer::agent::auth::auth_region: 'regionOne'
- # FIXME: Might be better to use 'service' tenant here but this requires
- # changes in the tripleo-incubator keystone role setup
- ceilometer::agent::auth::auth_tenant_name: 'admin'
-+aodh::auth::auth_tenant_name: 'admin'
-
- nova::network::neutron::neutron_admin_tenant_name: 'service'
- nova::network::neutron::neutron_admin_username: 'neutron'
-diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
-index 4b7fd81..7dbc2e9 100644
---- a/puppet/hieradata/controller.yaml
-+++ b/puppet/hieradata/controller.yaml
-@@ -32,6 +32,7 @@ redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
- # service tenant
- nova::api::admin_tenant_name: 'service'
- glance::api::keystone_tenant: 'service'
-+aodh::api::keystone_tenant: 'service'
- glance::registry::keystone_tenant: 'service'
- neutron::server::auth_tenant: 'service'
- neutron::agents::metadata::auth_tenant: 'service'
-@@ -39,6 +40,7 @@ cinder::api::keystone_tenant: 'service'
- swift::proxy::authtoken::admin_tenant_name: 'service'
- ceilometer::api::keystone_tenant: 'service'
- heat::keystone_tenant: 'service'
-+aodh::keystone::auth::tenant: 'service'
-
- # keystone
- keystone::cron::token_flush::maxdelay: 3600
-@@ -72,7 +74,7 @@ neutron::agents::dhcp::dnsmasq_config_file: /etc/neutron/dnsmasq-neutron.conf
-
- # nova
- nova::notify_on_state_change: 'vm_and_task_state'
--nova::api::default_floating_pool: 'public'
-+nova::api::default_floating_pool: 'external'
- nova::api::osapi_v3: true
- nova::scheduler::filter::ram_allocation_ratio: '1.0'
-
-@@ -115,6 +117,7 @@ tripleo::loadbalancer::mysql: true
- tripleo::loadbalancer::redis: true
- tripleo::loadbalancer::swift_proxy_server: true
- tripleo::loadbalancer::ceilometer: true
-+tripleo::loadbalancer::aodh: true
- tripleo::loadbalancer::heat_api: true
- tripleo::loadbalancer::heat_cloudwatch: true
- tripleo::loadbalancer::heat_cfn: true
-diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
-index cd41cc7..474d782 100644
---- a/puppet/manifests/overcloud_compute.pp
-+++ b/puppet/manifests/overcloud_compute.pp
-@@ -75,9 +75,52 @@ class { '::neutron::plugins::ml2':
- tenant_network_types => [hiera('neutron_tenant_network_type')],
- }
-
--class { '::neutron::agents::ml2::ovs':
-- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
-- tunnel_types => split(hiera('neutron_tunnel_types'), ','),
-+if 'opendaylight' in hiera('neutron_mechanism_drivers') {
-+
-+ if str2bool(hiera('opendaylight_install', 'false')) {
-+ $controller_ips = split(hiera('controller_node_ips'), ',')
-+ if hiera('opendaylight_enable_ha', false) {
-+ $odl_ovsdb_iface = "tcp:${controller_ips[0]}:6640 tcp:${controller_ips[1]}:6640 tcp:${controller_ips[2]}:6640"
-+ # Workaround to work with current puppet-neutron
-+ # This isn't the best solution, since the odl check URL ends up being only the first node in HA case
-+ $opendaylight_controller_ip = $controller_ips[0]
-+ # Bug where netvirt:1 doesn't come up right with HA
-+ # Check ovsdb:1 instead
-+ $net_virt_url = 'restconf/operational/network-topology:network-topology/topology/ovsdb:1'
-+ } else {
-+ $opendaylight_controller_ip = $controller_ips[0]
-+ $odl_ovsdb_iface = "tcp:${opendaylight_controller_ip}:6640"
-+ $net_virt_url = 'restconf/operational/network-topology:network-topology/topology/netvirt:1'
-+ }
-+ } else {
-+ $opendaylight_controller_ip = hiera('opendaylight_controller_ip')
-+ $odl_ovsdb_iface = "tcp:${opendaylight_controller_ip}:6640"
-+ $net_virt_url = 'restconf/operational/network-topology:network-topology/topology/netvirt:1'
-+ }
-+
-+ $opendaylight_port = hiera('opendaylight_port')
-+ $private_ip = hiera('neutron::agents::ml2::ovs::local_ip')
-+ $opendaylight_url = "http://${opendaylight_controller_ip}:${opendaylight_port}/${net_virt_url}"
-+
-+ class { '::neutron::plugins::ovs::opendaylight':
-+ tunnel_ip => $private_ip,
-+ odl_username => hiera('opendaylight_username'),
-+ odl_password => hiera('opendaylight_password'),
-+ odl_check_url => $opendaylight_url,
-+ odl_ovsdb_iface => $odl_ovsdb_iface,
-+ }
-+
-+} elsif 'onos_ml2' in hiera('neutron_mechanism_drivers') {
-+ $controller_ips = split(hiera('controller_node_ips'), ',')
-+ class {'onos::ovs_computer':
-+ manager_ip => $controller_ips[0]
-+ }
-+
-+} else {
-+ class { 'neutron::agents::ml2::ovs':
-+ bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
-+ tunnel_types => split(hiera('neutron_tunnel_types'), ','),
-+ }
- }
-
- if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
-diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
-index 1f6c2be..a3c0479 100644
---- a/puppet/manifests/overcloud_controller.pp
-+++ b/puppet/manifests/overcloud_controller.pp
-@@ -30,6 +30,21 @@ if hiera('step') >= 1 {
-
- if hiera('step') >= 2 {
-
-+ if str2bool(hiera('opendaylight_install', 'false')) {
-+ class {"opendaylight":
-+ extra_features => any2array(hiera('opendaylight_features', 'odl-ovsdb-openstack')),
-+ odl_rest_port => hiera('opendaylight_port'),
-+ enable_l3 => hiera('opendaylight_enable_l3', 'no'),
-+ }
-+ }
-+
-+ if 'onos_ml2' in hiera('neutron_mechanism_drivers') {
-+ # install onos and config ovs
-+ class {"onos":
-+ controllers_ip => $controller_node_ips
-+ }
-+ }
-+
- if count(hiera('ntp::servers')) > 0 {
- include ::ntp
- }
-@@ -158,6 +173,9 @@ if hiera('step') >= 2 {
-
- if hiera('step') >= 3 {
-
-+ # Apache
-+ include ::apache
-+
- include ::keystone
-
- #TODO: need a cleanup-keystone-tokens.sh solution here
-@@ -223,9 +241,7 @@ if hiera('step') >= 3 {
- include ::nova::scheduler
- include ::nova::scheduler::filter
-
-- include ::neutron
- include ::neutron::server
-- include ::neutron::agents::l3
- include ::neutron::agents::dhcp
- include ::neutron::agents::metadata
-
-@@ -237,15 +253,101 @@ if hiera('step') >= 3 {
- require => Package['neutron'],
- }
-
-+ if 'onos_ml2' in hiera('neutron_mechanism_drivers') {
-+ # config neutron service_plugins to onos driver
-+ class { '::neutron':
-+ service_plugins => [hiera('neutron_service_plugins')]
-+ }
-+ } else {
-+ include ::neutron
-+ if ! ('opendaylight' in hiera('neutron_mechanism_drivers')) or ! str2bool(hiera('opendaylight_enable_l3', 'no')) {
-+ include ::neutron::agents::l3
-+ }
-+ }
-+
- class { '::neutron::plugins::ml2':
- flat_networks => split(hiera('neutron_flat_networks'), ','),
- tenant_network_types => [hiera('neutron_tenant_network_type')],
- mechanism_drivers => [hiera('neutron_mechanism_drivers')],
- }
-- class { '::neutron::agents::ml2::ovs':
-- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
-- tunnel_types => split(hiera('neutron_tunnel_types'), ','),
-+
-+ if 'opendaylight' in hiera('neutron_mechanism_drivers') {
-+ if ! str2bool(hiera('opendaylight_enable_l3', 'no')) {
-+ Service['neutron-server'] -> Service['neutron-l3']
-+ }
-+
-+ if str2bool(hiera('opendaylight_install', 'false')) {
-+ $controller_ips = split(hiera('controller_node_ips'), ',')
-+ $opendaylight_controller_ip = $controller_ips[0]
-+ } else {
-+ $opendaylight_controller_ip = hiera('opendaylight_controller_ip')
-+ }
-+
-+ # co-existence hacks for SFC
-+ if hiera('opendaylight_features', 'odl-ovsdb-openstack') =~ /odl-ovsdb-sfc-rest/ {
-+ $opendaylight_port = hiera('opendaylight_port')
-+ $netvirt_coexist_url = "http://${opendaylight_controller_ip}:${opendaylight_port}/restconf/config/netvirt-providers-config:netvirt-providers-config"
-+ $netvirt_post_body = "{'netvirt-providers-config': {'table-offset': 1}}"
-+ $sfc_coexist_url = "http://${opendaylight_controller_ip}:${opendaylight_port}/restconf/config/sfc-of-renderer:sfc-of-renderer-config"
-+ $sfc_post_body = "{ 'sfc-of-renderer-config' : { 'sfc-of-table-offset' : 150, 'sfc-of-app-egress-table-offset' : 11 }}"
-+ $odl_username = hiera('opendaylight_username')
-+ $odl_password = hiera('opendaylight_password')
-+ exec { 'Coexistence table offsets for netvirt':
-+ command => "curl -o /dev/null --fail --silent -u ${odl_username}:${odl_password} ${netvirt_coexist_url} -i -H 'Content-Type: application/json' --data \'${netvirt_post_body}\' -X PUT",
-+ tries => 5,
-+ try_sleep => 30,
-+ path => '/usr/sbin:/usr/bin:/sbin:/bin',
-+ } ->
-+ # Coexist for SFC
-+ exec { 'Coexistence table offsets for sfc':
-+ command => "curl -o /dev/null --fail --silent -u ${odl_username}:${odl_password} ${sfc_coexist_url} -i -H 'Content-Type: application/json' --data \'${sfc_post_body}\' -X PUT",
-+ tries => 5,
-+ try_sleep => 30,
-+ path => '/usr/sbin:/usr/bin:/sbin:/bin',
-+ }
-+ }
-+
-+ $private_ip = hiera('neutron::agents::ml2::ovs::local_ip')
-+ $net_virt_url = 'restconf/operational/network-topology:network-topology/topology/netvirt:1'
-+ $opendaylight_url = "http://${opendaylight_controller_ip}:${opendaylight_port}/${net_virt_url}"
-+ $odl_ovsdb_iface = "tcp:${opendaylight_controller_ip}:6640"
-+
-+ class { '::neutron::plugins::ml2::opendaylight':
-+ odl_username => hiera('opendaylight_username'),
-+ odl_password => hiera('opendaylight_password'),
-+ odl_url => "http://${opendaylight_controller_ip}:${opendaylight_port}/controller/nb/v2/neutron";
-+ }
-+
-+ class { '::neutron::plugins::ovs::opendaylight':
-+ tunnel_ip => $private_ip,
-+ odl_username => hiera('opendaylight_username'),
-+ odl_password => hiera('opendaylight_password'),
-+ odl_check_url => $opendaylight_url,
-+ odl_ovsdb_iface => $odl_ovsdb_iface,
-+ }
-+
-+ } elsif 'onos_ml2' in hiera('neutron_mechanism_drivers') {
-+ #config ml2_conf.ini with onos url address
-+ $onos_port = hiera('onos_port')
-+ $private_ip = hiera('neutron::agents::ml2::ovs::local_ip')
-+
-+ neutron_plugin_ml2 {
-+ 'onos/username': value => 'admin';
-+ 'onos/password': value => 'admin';
-+ 'onos/url_path': value => "http://${controller_node_ips[0]}:${onos_port}/onos/vtn";
-+ }
-+
-+ } else {
-+
-+ class { 'neutron::agents::ml2::ovs':
-+ bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
-+ tunnel_types => split(hiera('neutron_tunnel_types'), ','),
-+ }
-+
-+ Service['neutron-server'] -> Service['neutron-ovs-agent-service']
-+ Service['neutron-server'] -> Service['neutron-l3']
- }
-+
- if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
- include ::neutron::plugins::ml2::cisco::nexus1000v
-
-@@ -280,8 +382,6 @@ if hiera('step') >= 3 {
- }
-
- Service['neutron-server'] -> Service['neutron-dhcp-service']
-- Service['neutron-server'] -> Service['neutron-l3']
-- Service['neutron-server'] -> Service['neutron-ovs-agent-service']
- Service['neutron-server'] -> Service['neutron-metadata']
-
- include ::cinder
-@@ -447,6 +547,20 @@ if hiera('step') >= 3 {
-
- Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
-
-+ # Aodh
-+ include ::aodh::auth
-+ include ::aodh::api
-+ include ::aodh::evaluator
-+ include ::aodh::notifier
-+ include ::aodh::listener
-+ include ::aodh::client
-+ include ::aodh::db::sync
-+ class { '::aodh' :
-+ database_connection => $ceilometer_database_connection,
-+ }
-+ # To manage the upgrade:
-+ Exec['ceilometer-dbsync'] -> Exec['aodh-db-sync']
-+
- # Heat
- include ::heat
- include ::heat::api
-diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
-index 3fb92f3..29f9c7f 100644
---- a/puppet/manifests/overcloud_controller_pacemaker.pp
-+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
-@@ -380,6 +380,29 @@ if hiera('step') >= 2 {
-
- }
-
-+ if str2bool(hiera('opendaylight_install', 'false')) {
-+ $node_string = split(hiera('bootstack_nodeid'), '-')
-+ $controller_index = $node_string[-1]
-+ $ha_node_index = $controller_index + 1
-+
-+ class {"opendaylight":
-+ extra_features => any2array(hiera('opendaylight_features', 'odl-ovsdb-openstack')),
-+ odl_rest_port => hiera('opendaylight_port'),
-+ odl_bind_ip => $controller_node_ips[$controller_index],
-+ enable_l3 => hiera('opendaylight_enable_l3', 'no'),
-+ enable_ha => hiera('opendaylight_enable_ha', false),
-+ ha_node_ips => split(hiera('controller_node_ips'), ','),
-+ ha_node_index => $ha_node_index,
-+ }
-+ }
-+
-+ if 'onos_ml2' in hiera('neutron_mechanism_drivers') {
-+ # install onos and config ovs
-+ class {"onos":
-+ controllers_ip => $controller_node_ips
-+ }
-+ }
-+
- exec { 'galera-ready' :
- command => '/usr/bin/clustercheck >/dev/null',
- timeout => 30,
-@@ -584,7 +607,14 @@ if hiera('step') >= 3 {
- include ::nova::network::neutron
-
- # Neutron class definitions
-- include ::neutron
-+ if 'onos_ml2' in hiera('neutron_mechanism_drivers') {
-+ # config neutron service_plugins to onos driver
-+ class { '::neutron':
-+ service_plugins => [hiera('neutron_service_plugins')]
-+ }
-+ } else {
-+ include ::neutron
-+ }
- class { '::neutron::server' :
- sync_db => $sync_db,
- manage_service => false,
-@@ -594,10 +624,6 @@ if hiera('step') >= 3 {
- manage_service => false,
- enabled => false,
- }
-- class { '::neutron::agents::l3' :
-- manage_service => false,
-- enabled => false,
-- }
- class { '::neutron::agents::metadata':
- manage_service => false,
- enabled => false,
-@@ -609,18 +635,98 @@ if hiera('step') >= 3 {
- notify => Service['neutron-dhcp-service'],
- require => Package['neutron'],
- }
-+
-+ # SDNVPN Hack
-+ if ('networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin' in hiera('neutron::service_plugins'))
-+ {
-+ class { 'neutron::config':
-+ server_config => {
-+ 'service_providers/service_provider' => {
-+ 'value' => 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
-+ }
-+ }
-+ }
-+ }
-+
- class { '::neutron::plugins::ml2':
- flat_networks => split(hiera('neutron_flat_networks'), ','),
- tenant_network_types => [hiera('neutron_tenant_network_type')],
- mechanism_drivers => [hiera('neutron_mechanism_drivers')],
- }
-- class { '::neutron::agents::ml2::ovs':
-- manage_service => false,
-- enabled => false,
-- bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
-- tunnel_types => split(hiera('neutron_tunnel_types'), ','),
-- }
-+ if 'opendaylight' in hiera('neutron_mechanism_drivers') {
-+ if str2bool(hiera('opendaylight_install', 'false')) {
-+ $controller_ips = split(hiera('controller_node_ips'), ',')
-+ if hiera('opendaylight_enable_ha', false) {
-+ $odl_ovsdb_iface = "tcp:${controller_ips[0]}:6640 tcp:${controller_ips[1]}:6640 tcp:${controller_ips[2]}:6640"
-+ # Workaround to work with current puppet-neutron
-+ # This isn't the best solution, since the odl check URL ends up being only the first node in HA case
-+ $opendaylight_controller_ip = $controller_ips[0]
-+ # Bug where netvirt:1 doesn't come up right with HA
-+ # Check ovsdb:1 instead
-+ $net_virt_url = 'restconf/operational/network-topology:network-topology/topology/ovsdb:1'
-+ } else {
-+ $opendaylight_controller_ip = $controller_ips[0]
-+ $odl_ovsdb_iface = "tcp:${opendaylight_controller_ip}:6640"
-+ $net_virt_url = 'restconf/operational/network-topology:network-topology/topology/netvirt:1'
-+ }
-+ } else {
-+ $opendaylight_controller_ip = hiera('opendaylight_controller_ip')
-+ $odl_ovsdb_iface = "tcp:${opendaylight_controller_ip}:6640"
-+ $net_virt_url = 'restconf/operational/network-topology:network-topology/topology/netvirt:1'
-+ }
-+
-+ $opendaylight_port = hiera('opendaylight_port')
-+ $private_ip = hiera('neutron::agents::ml2::ovs::local_ip')
-+ $opendaylight_url = "http://${opendaylight_controller_ip}:${opendaylight_port}/${net_virt_url}"
-+ $odl_vip = hiera('opendaylight_api_vip')
-+
-+ if ! $odl_vip {
-+ fail('ODL VIP not set in hiera or empty')
-+ }
-+
-+ class { '::neutron::plugins::ml2::opendaylight':
-+ odl_username => hiera('opendaylight_username'),
-+ odl_password => hiera('opendaylight_password'),
-+ odl_url => "http://${odl_vip}:${opendaylight_port}/controller/nb/v2/neutron";
-+ }
-+
-+ class { '::neutron::plugins::ovs::opendaylight':
-+ tunnel_ip => $private_ip,
-+ odl_username => hiera('opendaylight_username'),
-+ odl_password => hiera('opendaylight_password'),
-+ odl_check_url => $opendaylight_url,
-+ odl_ovsdb_iface => $odl_ovsdb_iface,
-+ }
-
-+ if ! str2bool(hiera('opendaylight_enable_l3', 'no')) {
-+ class { '::neutron::agents::l3' :
-+ manage_service => false,
-+ enabled => false,
-+ }
-+ }
-+ } elsif 'onos_ml2' in hiera('neutron_mechanism_drivers') {
-+ #config ml2_conf.ini with onos url address
-+ $onos_port = hiera('onos_port')
-+ $private_ip = hiera('neutron::agents::ml2::ovs::local_ip')
-+
-+ neutron_plugin_ml2 {
-+ 'onos/username': value => 'admin';
-+ 'onos/password': value => 'admin';
-+ 'onos/url_path': value => "http://${controller_node_ips[0]}:${onos_port}/onos/vtn";
-+ }
-+
-+ } else {
-+ class { '::neutron::agents::l3' :
-+ manage_service => false,
-+ enabled => false,
-+ }
-+ class { 'neutron::agents::ml2::ovs':
-+ manage_service => false,
-+ enabled => false,
-+ bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
-+ tunnel_types => split(hiera('neutron_tunnel_types'), ','),
-+ }
-+ }
- if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') {
- include ::neutron::plugins::ml2::cisco::ucsm
- }
-@@ -645,8 +751,10 @@ if hiera('step') >= 3 {
- if hiera('neutron_enable_bigswitch_ml2', false) {
- include ::neutron::plugins::ml2::bigswitch::restproxy
- }
-- neutron_l3_agent_config {
-- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
-+ if !('onos_ml2' in hiera('neutron_mechanism_drivers') or str2bool(hiera('opendaylight_enable_l3', 'no'))) {
-+ neutron_l3_agent_config {
-+ 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
-+ }
- }
- neutron_dhcp_agent_config {
- 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
-@@ -813,13 +921,13 @@ if hiera('step') >= 3 {
- swift::storage::filter::healthcheck { $swift_components : }
- }
-
-+ $mongo_node_string = join($mongo_node_ips_with_port, ',')
- # Ceilometer
- case downcase(hiera('ceilometer_backend')) {
- /mysql/: {
- $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
- }
- default: {
-- $mongo_node_string = join($mongo_node_ips_with_port, ',')
- $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
- }
- }
-@@ -879,6 +987,62 @@ if hiera('step') >= 3 {
- enabled => false,
- }
-
-+ $aodh_database_connection = "mongodb://${mongo_node_string}/aodh?replicaSet=${mongodb_replset}"
-+
-+ class { '::aodh::db':
-+ database_connection => $aodh_database_connection
-+ }
-+
-+ # Aodh
-+ include ::aodh
-+ include ::aodh::config
-+ include ::aodh::auth
-+ include ::aodh::client
-+ class { '::aodh::api':
-+ manage_service => false,
-+ enabled => false,
-+ }
-+ class { '::aodh::evaluator':
-+ manage_service => false,
-+ enabled => false,
-+ }
-+ class { '::aodh::notifier':
-+ manage_service => false,
-+ enabled => false,
-+ }
-+ class { '::aodh::listener':
-+ manage_service => false,
-+ enabled => false,
-+ }
-+
-+ $event_pipeline = "---
-+sources:
-+ - name: event_source
-+ events:
-+ - \"*\"
-+ sinks:
-+ - event_sink
-+sinks:
-+ - name: event_sink
-+ transformers:
-+ triggers:
-+ publishers:
-+ - notifier://?topic=alarm.all
-+ - notifier://
-+"
-+
-+ # aodh hacks
-+ file { '/etc/ceilometer/event_pipeline.yaml':
-+ ensure => present,
-+ content => $event_pipeline
-+ }
-+
-+ user { 'aodh':
-+ groups => 'nobody'
-+ }
-+
-+
-+
- # httpd/apache and horizon
- # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
- class { '::apache' :
-@@ -1055,62 +1219,21 @@ if hiera('step') >= 4 {
- clone_params => 'interleave=true',
- require => Pacemaker::Resource::Service[$::keystone::params::service_name],
- }
-- pacemaker::resource::service { $::neutron::params::l3_agent_service:
-- clone_params => 'interleave=true',
-+ if !('onos_ml2' in hiera('neutron_mechanism_drivers')) {
-+ pacemaker::resource::service { $::neutron::params::l3_agent_service:
-+ clone_params => 'interleave=true',
-+ }
- }
- pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
- clone_params => 'interleave=true',
- }
-- pacemaker::resource::service { $::neutron::params::ovs_agent_service:
-- clone_params => 'interleave=true',
-- }
- pacemaker::resource::service { $::neutron::params::metadata_agent_service:
- clone_params => 'interleave=true',
- }
-- pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
-- ocf_agent_name => 'neutron:OVSCleanup',
-- clone_params => 'interleave=true',
-- }
- pacemaker::resource::ocf { 'neutron-netns-cleanup':
- ocf_agent_name => 'neutron:NetnsCleanup',
- clone_params => 'interleave=true',
- }
--
-- # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
-- pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
-- constraint_type => 'order',
-- first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
-- second_resource => 'neutron-netns-cleanup-clone',
-- first_action => 'start',
-- second_action => 'start',
-- require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
-- Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
-- }
-- pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
-- source => 'neutron-netns-cleanup-clone',
-- target => "${::neutron::params::ovs_cleanup_service}-clone",
-- score => 'INFINITY',
-- require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
-- Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
-- }
-- pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
-- constraint_type => 'order',
-- first_resource => 'neutron-netns-cleanup-clone',
-- second_resource => "${::neutron::params::ovs_agent_service}-clone",
-- first_action => 'start',
-- second_action => 'start',
-- require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
-- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
-- }
-- pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
-- source => "${::neutron::params::ovs_agent_service}-clone",
-- target => 'neutron-netns-cleanup-clone',
-- score => 'INFINITY',
-- require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
-- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
-- }
--
-- #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3
- pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
- constraint_type => 'order',
- first_resource => "${::keystone::params::service_name}-clone",
-@@ -1120,65 +1243,110 @@ if hiera('step') >= 4 {
- require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
- Pacemaker::Resource::Service[$::neutron::params::server_service]],
- }
-- pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
-- constraint_type => 'order',
-- first_resource => "${::neutron::params::server_service}-clone",
-- second_resource => "${::neutron::params::ovs_agent_service}-clone",
-- first_action => 'start',
-- second_action => 'start',
-- require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
-- Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
-- }
-- pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
-- constraint_type => 'order',
-- first_resource => "${::neutron::params::ovs_agent_service}-clone",
-- second_resource => "${::neutron::params::dhcp_agent_service}-clone",
-- first_action => 'start',
-- second_action => 'start',
-- require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
-- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
-+ if 'openvswitch' in hiera('neutron_mechanism_drivers') {
-+ pacemaker::resource::service { $::neutron::params::ovs_agent_service:
-+ clone_params => "interleave=true",
-+ }
-+ pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
-+ ocf_agent_name => "neutron:OVSCleanup",
-+ clone_params => "interleave=true",
-+ }
-+ # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
-+ pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
-+ constraint_type => "order",
-+ first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
-+ second_resource => "neutron-netns-cleanup-clone",
-+ first_action => "start",
-+ second_action => "start",
-+ require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
-+ Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
-+ }
-+ pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
-+ source => "neutron-netns-cleanup-clone",
-+ target => "${::neutron::params::ovs_cleanup_service}-clone",
-+ score => "INFINITY",
-+ require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
-+ Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
-+ }
-+ pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
-+ constraint_type => "order",
-+ first_resource => "neutron-netns-cleanup-clone",
-+ second_resource => "${::neutron::params::ovs_agent_service}-clone",
-+ first_action => "start",
-+ second_action => "start",
-+ require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
-+ Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
-+ }
-+ pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
-+ source => "${::neutron::params::ovs_agent_service}-clone",
-+ target => "neutron-netns-cleanup-clone",
-+ score => "INFINITY",
-+ require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
-+ Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
-+ }
-
-+ #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3
-+ pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
-+ constraint_type => "order",
-+ first_resource => "${::neutron::params::server_service}-clone",
-+ second_resource => "${::neutron::params::ovs_agent_service}-clone",
-+ first_action => "start",
-+ second_action => "start",
-+ require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
-+ Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
-+ }
-+ pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
-+ constraint_type => "order",
-+ first_resource => "${::neutron::params::ovs_agent_service}-clone",
-+ second_resource => "${::neutron::params::dhcp_agent_service}-clone",
-+ first_action => "start",
-+ second_action => "start",
-+ require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
-+ Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
-+
-+ }
-+ pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
-+ source => "${::neutron::params::dhcp_agent_service}-clone",
-+ target => "${::neutron::params::ovs_agent_service}-clone",
-+ score => "INFINITY",
-+ require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
-+ Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
-+ }
- }
-- pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
-- source => "${::neutron::params::dhcp_agent_service}-clone",
-- target => "${::neutron::params::ovs_agent_service}-clone",
-- score => 'INFINITY',
-- require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
-- Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
-- }
-- pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
-- constraint_type => 'order',
-- first_resource => "${::neutron::params::dhcp_agent_service}-clone",
-- second_resource => "${::neutron::params::l3_agent_service}-clone",
-- first_action => 'start',
-- second_action => 'start',
-- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
-- Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]],
-- }
-- pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
-- source => "${::neutron::params::l3_agent_service}-clone",
-- target => "${::neutron::params::dhcp_agent_service}-clone",
-- score => 'INFINITY',
-- require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
-- Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]],
-- }
-- pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
-- constraint_type => 'order',
-- first_resource => "${::neutron::params::l3_agent_service}-clone",
-- second_resource => "${::neutron::params::metadata_agent_service}-clone",
-- first_action => 'start',
-- second_action => 'start',
-- require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
-- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
-- }
-- pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
-- source => "${::neutron::params::metadata_agent_service}-clone",
-- target => "${::neutron::params::l3_agent_service}-clone",
-- score => 'INFINITY',
-- require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
-- Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
-+ if !('onos_ml2' in hiera('neutron_mechanism_drivers') or str2bool(hiera('opendaylight_enable_l3', 'no'))) {
-+ pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
-+ constraint_type => 'order',
-+ first_resource => "${::neutron::params::dhcp_agent_service}-clone",
-+ second_resource => "${::neutron::params::l3_agent_service}-clone",
-+ first_action => 'start',
-+ second_action => 'start',
-+ require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
-+ Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]],
-+ }
-+ pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
-+ source => "${::neutron::params::l3_agent_service}-clone",
-+ target => "${::neutron::params::dhcp_agent_service}-clone",
-+ score => 'INFINITY',
-+ require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
-+ Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]],
-+ }
-+ pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
-+ constraint_type => 'order',
-+ first_resource => "${::neutron::params::l3_agent_service}-clone",
-+ second_resource => "${::neutron::params::metadata_agent_service}-clone",
-+ first_action => 'start',
-+ second_action => 'start',
-+ require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
-+ Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
-+ }
-+ pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
-+ source => "${::neutron::params::metadata_agent_service}-clone",
-+ target => "${::neutron::params::l3_agent_service}-clone",
-+ score => 'INFINITY',
-+ require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
-+ Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
-+ }
- }
--
- # Nova
- pacemaker::resource::service { $::nova::params::api_service_name :
- clone_params => 'interleave=true',
-@@ -1276,7 +1444,7 @@ if hiera('step') >= 4 {
- Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
- }
-
-- # Ceilometer
-+ # Ceilometer and Aodh
- case downcase(hiera('ceilometer_backend')) {
- /mysql/: {
- pacemaker::resource::service { $::ceilometer::params::agent_central_service_name :
-@@ -1298,10 +1466,19 @@ if hiera('step') >= 4 {
- pacemaker::resource::service { $::ceilometer::params::api_service_name :
- clone_params => 'interleave=true',
- }
-- pacemaker::resource::service { $::ceilometer::params::alarm_evaluator_service_name :
-+ pacemaker::resource::service { $::aodh::params::notifier_service_name :
- clone_params => 'interleave=true',
- }
-- pacemaker::resource::service { $::ceilometer::params::alarm_notifier_service_name :
-+ pacemaker::resource::service { $::aodh::params::expirer_package_serice :
-+ clone_params => 'interleave=true',
-+ }
-+ pacemaker::resource::service { $::aodh::params::listener_service_name :
-+ clone_params => 'interleave=true',
-+ }
-+ pacemaker::resource::service { $::aodh::params::api_service_name :
-+ clone_params => 'interleave=true',
-+ }
-+ pacemaker::resource::service { $::aodh::params::evaluator_service_name :
- clone_params => 'interleave=true',
- }
- pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
-@@ -1315,8 +1492,19 @@ if hiera('step') >= 4 {
- # Fedora doesn't know `require-all` parameter for constraints yet
- if $::operatingsystem == 'Fedora' {
- $redis_ceilometer_constraint_params = undef
-+ $redis_aodh_constraint_params = undef
- } else {
- $redis_ceilometer_constraint_params = 'require-all=false'
-+ $redis_aodh_constraint_params = 'require-all=false'
-+ }
-+ pacemaker::constraint::base { 'keystone-then-aodh-api-constraint':
-+ constraint_type => 'order',
-+ first_resource => "${::keystone::params::service_name}-clone",
-+ second_resource => "${::aodh::params::api_service_name}-clone",
-+ first_action => 'start',
-+ second_action => 'start',
-+ require => [Pacemaker::Resource::Service[$::aodh::params::api_service_name],
-+ Pacemaker::Resource::Service[$::keystone::params::service_name]],
- }
- pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
- constraint_type => 'order',
-@@ -1328,6 +1516,16 @@ if hiera('step') >= 4 {
- require => [Pacemaker::Resource::Ocf['redis'],
- Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
- }
-+ pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint':
-+ constraint_type => 'order',
-+ first_resource => 'redis-master',
-+ second_resource => "${::aodh::params::evaluator_service_name}-clone",
-+ first_action => 'promote',
-+ second_action => 'start',
-+ constraint_params => $redis_aodh_constraint_params,
-+ require => [Pacemaker::Resource::Ocf['redis'],
-+ Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]],
-+ }
- pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
- constraint_type => 'order',
- first_resource => "${::keystone::params::service_name}-clone",
-@@ -1378,53 +1576,37 @@ if hiera('step') >= 4 {
- require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
- Pacemaker::Resource::Ocf['delay']],
- }
-- pacemaker::constraint::base { 'ceilometer-delay-then-ceilometer-alarm-evaluator-constraint':
-+ pacemaker::constraint::base { 'aodh-delay-then-aodh-evaluator-constraint':
- constraint_type => 'order',
- first_resource => 'delay-clone',
-- second_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
-+ second_resource => "${::aodh::params::evaluator_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
-- require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
-+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
- Pacemaker::Resource::Ocf['delay']],
- }
-- pacemaker::constraint::colocation { 'ceilometer-alarm-evaluator-with-ceilometer-delay-colocation':
-- source => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
-+ pacemaker::constraint::colocation { 'aodh-evaluator-with-aodh-delay-colocation':
-+ source => "${::aodh::params::evaluator_service_name}-clone",
- target => 'delay-clone',
- score => 'INFINITY',
-- require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
-+ require => [Pacemaker::Resource::Service[$::horizon::params::http_service],
- Pacemaker::Resource::Ocf['delay']],
- }
-- pacemaker::constraint::base { 'ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint':
-- constraint_type => 'order',
-- first_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
-- second_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone",
-- first_action => 'start',
-- second_action => 'start',
-- require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
-- Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
-- }
-- pacemaker::constraint::colocation { 'ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation':
-- source => "${::ceilometer::params::alarm_notifier_service_name}-clone",
-- target => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
-- score => 'INFINITY',
-- require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
-- Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
-- }
-- pacemaker::constraint::base { 'ceilometer-alarm-notifier-then-ceilometer-notification-constraint':
-+ pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint':
- constraint_type => 'order',
-- first_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone",
-- second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
-+ first_resource => "${::aodh::params::evaluator_service_name}-clone",
-+ second_resource => "${::aodh::params::notifier_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
-- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
-- Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
-+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
-+ Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
- }
-- pacemaker::constraint::colocation { 'ceilometer-notification-with-ceilometer-alarm-notifier-colocation':
-- source => "${::ceilometer::params::agent_notification_service_name}-clone",
-- target => "${::ceilometer::params::alarm_notifier_service_name}-clone",
-+ pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation':
-+ source => "${::aodh::params::notifier_service_name}-clone",
-+ target => "${::aodh::params::evaluator_service_name}-clone",
- score => 'INFINITY',
-- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
-- Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
-+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
-+ Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
- }
- if downcase(hiera('ceilometer_backend')) == 'mongodb' {
- pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
-diff --git a/puppet/manifests/overcloud_opendaylight.pp b/puppet/manifests/overcloud_opendaylight.pp
-new file mode 100644
-index 0000000..aeb31be
---- /dev/null
-+++ b/puppet/manifests/overcloud_opendaylight.pp
-@@ -0,0 +1,27 @@
-+# Copyright 2015 Red Hat, Inc.
-+# All Rights Reserved.
-+#
-+# Licensed under the Apache License, Version 2.0 (the "License"); you may
-+# not use this file except in compliance with the License. You may obtain
-+# a copy of the License at
-+#
-+# http://www.apache.org/licenses/LICENSE-2.0
-+#
-+# Unless required by applicable law or agreed to in writing, software
-+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-+# License for the specific language governing permissions and limitations
-+# under the License.
-+
-+include ::tripleo::packages
-+
-+if count(hiera('ntp::servers')) > 0 {
-+ include ::ntp
-+}
-+
-+class {"opendaylight":
-+ extra_features => any2array(hiera('opendaylight_features', 'odl-ovsdb-openstack')),
-+ odl_rest_port => hiera('opendaylight_port'),
-+ enable_l3 => hiera('opendaylight_enable_l3', 'no'),
-+}
-+
-diff --git a/puppet/opendaylight-puppet.yaml b/puppet/opendaylight-puppet.yaml
-new file mode 100644
-index 0000000..6488e0e
---- /dev/null
-+++ b/puppet/opendaylight-puppet.yaml
-@@ -0,0 +1,223 @@
-+heat_template_version: 2015-04-30
-+
-+description: >
-+ OpenDaylight node configured by Puppet.
-+
-+parameters:
-+ OpenDaylightFlavor:
-+ default: baremetal
-+ description: The flavor to use for the OpenDaylight node
-+ type: string
-+ OpenDaylightImage:
-+ default: overcloud-full
-+ description: The image to use for the OpenDaylight node
-+ type: string
-+ OpenDaylightHostname:
-+ default: opendaylight-server
-+ description: The hostname to use for the OpenDaylight node
-+ type: string
-+ OpenDaylightUsername:
-+ default: admin
-+ description: The admin user for the OpenDaylight node
-+ type: string
-+ OpenDaylightPassword:
-+ default: ''
-+ description: The admin password for the OpenDaylight node
-+ type: string
-+ hidden: true
-+ OpenDaylightEnableL3:
-+ description: Knob to enable/disable ODL L3
-+ type: string
-+ default: 'no'
-+ OpenDaylightFeatures:
-+ description: List of features to install with ODL
-+ type: comma_delimited_list
-+ default: "odl-ovsdb-openstack"
-+ OpenDaylightPort:
-+ default: 8081
-+ description: Set OpenDaylight service port
-+ type: number
-+ KeyName:
-+ description: The keypair to use for SSH access to the node (via heat-admin user)
-+ type: string
-+ default: default
-+ constraints:
-+ - custom_constraint: nova.keypair
-+ ImageUpdatePolicy:
-+ default: 'REBUILD_PRESERVE_EPHEMERAL'
-+ description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
-+ type: string
-+ UpdateIdentifier:
-+ default: ''
-+ type: string
-+ description: >
-+ Setting to a previously unused value during stack-update will trigger
-+ package update on all nodes
-+ NtpServer:
-+ type: string
-+ default: ''
-+ PublicInterface:
-+ default: nic1
-+ description: What interface to bridge onto br-ex for network nodes.
-+ type: string
-+
-+resources:
-+ OpenDaylightNode:
-+ type: OS::Nova::Server
-+ properties:
-+ image: {get_param: OpenDaylightImage}
-+ image_update_policy: {get_param: ImageUpdatePolicy}
-+ flavor: {get_param: OpenDaylightFlavor}
-+ key_name: {get_param: KeyName}
-+ networks:
-+ - network: ctlplane
-+ user_data_format: SOFTWARE_CONFIG
-+ user_data: {get_resource: NodeUserData}
-+ name: {get_param: OpenDaylightHostname}
-+
-+ NodeUserData:
-+ type: OS::TripleO::NodeUserData
-+
-+ ExternalPort:
-+ type: OS::TripleO::Controller::Ports::ExternalPort
-+ properties:
-+ ControlPlaneIP: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]}
-+
-+ InternalApiPort:
-+ type: OS::TripleO::Controller::Ports::InternalApiPort
-+ properties:
-+ ControlPlaneIP: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]}
-+
-+ NetIpMap:
-+ type: OS::TripleO::Network::Ports::NetIpMap
-+ properties:
-+ ControlPlaneIp: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]}
-+ ExternalIp: {get_attr: [ExternalPort, ip_address]}
-+ InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
-+
-+ NetIpSubnetMap:
-+ type: OS::TripleO::Network::Ports::NetIpSubnetMap
-+ properties:
-+ ControlPlaneIp: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]}
-+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
-+ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
-+
-+ NetworkConfig:
-+ type: OS::TripleO::Controller::Net::SoftwareConfig
-+ properties:
-+ ControlPlaneIp: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]}
-+ ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
-+ InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
-+
-+ NetworkDeployment:
-+ type: OS::TripleO::SoftwareDeployment
-+ properties:
-+ config: {get_resource: NetworkConfig}
-+ server: {get_resource: OpenDaylightNode}
-+ input_values:
-+ bridge_name: br-ex
-+ interface_name: {get_param: PublicInterface}
-+
-+ OpenDaylightDeployment:
-+ type: OS::TripleO::SoftwareDeployment
-+ depends_on: NetworkDeployment
-+ properties:
-+ config: {get_resource: OpenDaylightConfig}
-+ server: {get_resource: OpenDaylightNode}
-+ input_values:
-+ ntp_servers:
-+ str_replace:
-+ template: '["server"]'
-+ params:
-+ server: {get_param: NtpServer}
-+ opendaylight_port: {get_param: OpenDaylightPort}
-+ opendaylight_enable_l3: {get_param: OpenDaylightEnableL3}
-+ opendaylight_username: {get_param: OpenDaylightUsername}
-+ opendaylight_password: {get_param: OpenDaylightPassword}
-+ opendaylight_features: {get_param: OpenDaylightFeatures}
-+
-+ OpenDaylightConfig:
-+ type: OS::Heat::StructuredConfig
-+ properties:
-+ group: os-apply-config
-+ config:
-+ hiera:
-+ hierarchy:
-+ - '"%{::uuid}"'
-+ - heat_config_%{::deploy_config_name}
-+ - extraconfig
-+ - bootstrap_node # provided by BootstrapNodeConfig
-+ - all_nodes # provided by allNodesConfig
-+ - vip_data # provided by vip-config
-+ - RedHat # Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1236143
-+ - common
-+ datafiles:
-+ common:
-+ raw_data: {get_file: hieradata/common.yaml}
-+ mapped_data:
-+ ntp::servers: {get_input: ntp_servers}
-+ opendaylight::admin_username: {get_param: OpenDaylightUsername}
-+ opendaylight::admin_password: {get_param: OpenDaylightPassword}
-+ opendaylight_port: {get_input: opendaylight_port}
-+ opendaylight_enable_l3: {get_input: opendaylight_enable_l3}
-+ opendaylight_features: {get_input: opendaylight_features}
-+ ceph:
-+ raw_data: {get_file: hieradata/ceph.yaml}
-+
-+ UpdateConfig:
-+ type: OS::TripleO::Tasks::PackageUpdate
-+
-+ UpdateDeployment:
-+ type: OS::Heat::SoftwareDeployment
-+ properties:
-+ config: {get_resource: UpdateConfig}
-+ server: {get_resource: OpenDaylightNode}
-+ input_values:
-+ update_identifier:
-+ get_param: UpdateIdentifier
-+
-+ OpenDaylightHostsConfig:
-+ type: OS::Heat::SoftwareConfig
-+ properties:
-+ group: script
-+ config: |
-+ #!/usr/bin/env bash
-+ echo -e "$(facter ipaddress)\t\t$(hostname -f)\t$(hostname -s)" >> /etc/hosts
-+
-+ OpenDaylightHostsDeployment:
-+ type: OS::Heat::StructuredDeployment
-+ depends_on: OpenDaylightDeployment
-+ properties:
-+ server: {get_resource: OpenDaylightNode}
-+ config: {get_resource: OpenDaylightHostsConfig}
-+
-+ OpenDaylightPuppetConfig:
-+ type: OS::Heat::SoftwareConfig
-+ properties:
-+ group: puppet
-+ config:
-+ get_file: manifests/overcloud_opendaylight.pp
-+
-+ OpenDaylightPuppetDeployment:
-+ depends_on: OpenDaylightHostsDeployment
-+ type: OS::Heat::StructuredDeployment
-+ properties:
-+ server: {get_resource: OpenDaylightNode}
-+ config: {get_resource: OpenDaylightPuppetConfig}
-+ input_values:
-+ update_identifier: {get_param: UpdateIdentifier}
-+
-+outputs:
-+ ip_address:
-+ description: IP address of the server in the ctlplane network
-+ value: {get_attr: [OpenDaylightNode, networks, ctlplane, 0]}
-+ opendaylight_controller_ip:
-+ description: IP address of the server on the internal network
-+ value: {get_attr: [InternalApiPort, ip_address]}
-+ config_identifier:
-+ description: identifier which changes if the node configuration may need re-applying
-+ value:
-+ list_join:
-+ - ','
-+ - - {get_attr: [OpenDaylightDeployment, deploy_stdout]}
-+ - {get_param: UpdateIdentifier}
-diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml
-index 1dec489..727bb79 100644
---- a/puppet/vip-config.yaml
-+++ b/puppet/vip-config.yaml
-@@ -27,6 +27,7 @@ resources:
- horizon_vip: {get_input: horizon_vip}
- redis_vip: {get_input: redis_vip}
- mysql_vip: {get_input: mysql_vip}
-+ opendaylight_api_vip: {get_input: opendaylight_api_vip}
- tripleo::loadbalancer::public_virtual_ip: {get_input: public_virtual_ip}
- tripleo::loadbalancer::controller_virtual_ip: {get_input: control_virtual_ip}
- tripleo::loadbalancer::internal_api_virtual_ip: {get_input: internal_api_virtual_ip}
---
-2.5.0
-
diff --git a/build/undercloud.sh b/build/undercloud.sh
index 3ac46e0f..65462ec8 100755
--- a/build/undercloud.sh
+++ b/build/undercloud.sh
@@ -18,18 +18,6 @@ cp -f cache/undercloud.qcow2 images/
#Adding OpenStack packages to undercloud
pushd images > /dev/null
-# install the packages above and enabling ceph to live on the controller
-# OpenWSMan package update supports the AMT Ironic driver for the TealBox
-LIBGUESTFS_BACKEND=direct virt-customize \
- --run-command "sed -i '/ControllerEnableCephStorage/c\\ ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
- --run-command "sed -i '/ \$enable_ceph = /c\\ \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller_pacemaker.pp" \
- --run-command "sed -i '/ \$enable_ceph = /c\\ \$enable_ceph = true' /usr/share/openstack-tripleo-heat-templates/puppet/manifests/overcloud_controller.pp" \
- --run-command "curl http://download.opensuse.org/repositories/Openwsman/CentOS_CentOS-7/Openwsman.repo > /etc/yum.repos.d/wsman.repo" \
- --run-command "yum update -y openwsman*" \
- --run-command "cp /usr/share/instack-undercloud/undercloud.conf.sample /home/stack/undercloud.conf && chown stack:stack /home/stack/undercloud.conf" \
- --upload ../opnfv-environment.yaml:/home/stack/ \
- -a undercloud.qcow2
-
# Use apex tripleo-heat-templates fork
PR_NUMBER=""
REF="stable/colorado"
@@ -51,10 +39,12 @@ if [ "$PR_NUMBER" != "" ]; then
PR=$(curl $GHCREDS https://api.github.com/repos/trozet/opnfv-tht/pulls/$PR_NUMBER)
# Do not pull from merged branches
- MERGED=$(echo $PR | python -c "import sys,json; print json.load(sys.stdin)['head']['merged']")
- if [ "$MERGED" == false ]; then
- REF=$(echo $PR | python -c "import sys,json; print json.load(sys.stdin)['head']['ref']")
- REPO=$(echo $PR | python -c "import sys,json; print json.load(sys.stdin)['head']['repo']['git_url']")
+ MERGED=$(python -c "import json; print json.loads('''$PR'''.replace('\n', '').replace('\r', ''))['merged']")
+ if [ "$MERGED" == "False" ]; then
+ REF=$(python -c "import json; print json.loads('''$PR'''.replace('\n', '').replace('\r', ''))['head']['ref']")
+ echo "Setting GitHub Ref to: $REF"
+ REPO=$(python -c "import json; print json.loads('''$PR'''.replace('\n', '').replace('\r', ''))['head']['repo']['git_url']")
+ echo "Setting GitHub URL to: $REPO"
fi
fi
@@ -68,5 +58,16 @@ LIBGUESTFS_BACKEND=direct virt-customize --upload opnfv-tht.tar.gz:/usr/share \
--run-command "cd /usr/share && rm -rf openstack-tripleo-heat-templates && tar xzf opnfv-tht.tar.gz" \
-a undercloud.qcow2
+# install the packages above and enabling ceph to live on the controller
+# OpenWSMan package update supports the AMT Ironic driver for the TealBox
+LIBGUESTFS_BACKEND=direct virt-customize \
+ --run-command "sed -i '/ControllerEnableCephStorage/c\\ ControllerEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
+ --run-command "sed -i '/ComputeEnableCephStorage/c\\ ComputeEnableCephStorage: true' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" \
+ --run-command "curl http://download.opensuse.org/repositories/Openwsman/CentOS_CentOS-7/Openwsman.repo > /etc/yum.repos.d/wsman.repo" \
+ --run-command "yum update -y openwsman*" \
+ --run-command "cp /usr/share/instack-undercloud/undercloud.conf.sample /home/stack/undercloud.conf && chown stack:stack /home/stack/undercloud.conf" \
+ --upload ../opnfv-environment.yaml:/home/stack/ \
+ -a undercloud.qcow2
+
popd > /dev/null