summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--environments/enable-tls.yaml3
-rw-r--r--environments/external-loadbalancer-vip.yaml7
-rw-r--r--environments/network-isolation-v6.yaml2
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_migrations.sh25
-rwxr-xr-xextraconfig/tasks/pacemaker_resource_restart.sh6
-rwxr-xr-xextraconfig/tasks/yum_update.sh8
-rw-r--r--net-config-bond.yaml6
-rw-r--r--network/endpoints/endpoint_data.yaml9
-rw-r--r--network/endpoints/endpoint_map.yaml118
-rw-r--r--overcloud.yaml19
-rw-r--r--puppet/all-nodes-config.yaml11
-rw-r--r--puppet/controller.yaml44
-rw-r--r--puppet/hieradata/common.yaml3
-rw-r--r--puppet/hieradata/controller.yaml7
-rw-r--r--puppet/manifests/overcloud_controller.pp34
-rw-r--r--puppet/manifests/overcloud_controller_pacemaker.pp102
-rw-r--r--puppet/vip-config.yaml1
-rwxr-xr-xtools/yaml-validate.py11
18 files changed, 398 insertions, 18 deletions
diff --git a/environments/enable-tls.yaml b/environments/enable-tls.yaml
index 49a87bb5..70181278 100644
--- a/environments/enable-tls.yaml
+++ b/environments/enable-tls.yaml
@@ -5,6 +5,9 @@ parameter_defaults:
SSLKey: |
The contents of the private key go here
EndpointMap:
+ AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
diff --git a/environments/external-loadbalancer-vip.yaml b/environments/external-loadbalancer-vip.yaml
index 198892cd..8656ba1a 100644
--- a/environments/external-loadbalancer-vip.yaml
+++ b/environments/external-loadbalancer-vip.yaml
@@ -10,7 +10,8 @@ resource_registry:
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
- OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management_from_pool.yaml
+ # Management network is optional and disabled by default
+ #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management_from_pool.yaml
parameter_defaults:
# When using an external loadbalancer set the following in parameter_defaults
@@ -35,6 +36,6 @@ parameter_defaults:
- 172.16.3.253
tenant:
- 172.16.0.253
- management:
- - 172.16.4.253
+ #management:
+ #- 172.16.4.253
EnableLoadBalancer: false
diff --git a/environments/network-isolation-v6.yaml b/environments/network-isolation-v6.yaml
index 599a08b1..11ca5b31 100644
--- a/environments/network-isolation-v6.yaml
+++ b/environments/network-isolation-v6.yaml
@@ -53,3 +53,5 @@ parameter_defaults:
NovaIPv6: True
# Enable IPv6 environment for RabbitMQ.
RabbitIPv6: true
+ # Enable IPv6 environment for Memcached.
+ MemcachedIPv6: true
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
index 7fd26945..1f420b32 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
+++ b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
@@ -12,3 +12,28 @@
# The migration functions should be idempotent. If the migration has
# been already applied, it should be possible to call the function
# again without damaging the deployment or failing the upgrade.
+
+function remove_ceilometer_alarm {
+ if pcs status | grep openstack-ceilometer-alarm; then
+ # Disable pacemaker resources for ceilometer-alarms
+ pcs resource disable openstack-ceilometer-alarm-evaluator
+ check_resource openstack-ceilometer-alarm-evaluator stopped 600
+ pcs resource delete openstack-ceilometer-alarm-evaluator
+ pcs resource disable openstack-ceilometer-alarm-notifier
+ check_resource openstack-ceilometer-alarm-notifier stopped 600
+ pcs resource delete openstack-ceilometer-alarm-notifier
+
+ # remove constraints
+ pcs constraint remove ceilometer-delay-then-ceilometer-alarm-evaluator-constraint
+ pcs constraint remove ceilometer-alarm-evaluator-with-ceilometer-delay-colocation
+ pcs constraint remove ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint
+ pcs constraint remove ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation
+ pcs constraint remove ceilometer-alarm-notifier-then-ceilometer-notification-constraint
+ pcs constraint remove ceilometer-notification-with-ceilometer-alarm-notifier-colocation
+
+ fi
+
+ # uninstall openstack-ceilometer-alarm package
+ yum -y remove openstack-ceilometer-alarm
+
+}
diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh
index b2bdc55a..09a452e6 100755
--- a/extraconfig/tasks/pacemaker_resource_restart.sh
+++ b/extraconfig/tasks/pacemaker_resource_restart.sh
@@ -36,3 +36,9 @@ if [ "$pacemaker_status" = "active" -a \
check_resource httpd started 800
fi
+
+if [ "$pacemaker_status" = "active" ]; then
+ # TODO(marios): remove this once +bug/1561012
+ # need this on all controllers:
+ systemctl reload haproxy
+fi
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index 59e4be45..66efc5c5 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -53,12 +53,13 @@ neutron-l3-agent
neutron-metadata-agent
neutron-openvswitch-agent
neutron-server
-openstack-ceilometer-alarm-evaluator
-openstack-ceilometer-alarm-notifier
openstack-ceilometer-api
openstack-ceilometer-central
openstack-ceilometer-collector
openstack-ceilometer-notification
+openstack-aodh-evaluator
+openstack-aodh-notifier
+openstack-aodh-listener
openstack-cinder-api
openstack-cinder-scheduler
openstack-cinder-volume
@@ -107,6 +108,9 @@ openstack-nova-scheduler"
pcs -f $pacemaker_dumpfile constraint order promote redis-master then start openstack-ceilometer-central-clone require-all=false
fi
+ if ! pcs constraint order show | grep "promote redis-master then start openstack-aodh-evaluator-clone"; then
+ pcs -f $pacemaker_dumpfile constraint order promote redis-master then start openstack-aodh-evaluator-clone require-all=false
+ fi
# ensure neutron constraints https://review.openstack.org/#/c/229466
# remove ovs-cleanup after server and add openvswitch-agent instead
if pcs constraint order show | grep "start neutron-server-clone then start neutron-ovs-cleanup-clone"; then
diff --git a/net-config-bond.yaml b/net-config-bond.yaml
index b624563f..0a162e77 100644
--- a/net-config-bond.yaml
+++ b/net-config-bond.yaml
@@ -4,6 +4,11 @@ description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge.
parameters:
+ BondInterfaceOvsOptions:
+ default: ''
+ description: The ovs_options string for the bond interface. Set things like
+ lacp=active and/or bond_mode=balance-slb using this option.
+ type: string
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
@@ -58,6 +63,7 @@ resources:
type: ovs_bond
name: bond1
use_dhcp: true
+ ovs_options: {get_param: BondInterfaceOvsOptions}
members:
# os-net-config translates nic1 => em1 (for example)
-
diff --git a/network/endpoints/endpoint_data.yaml b/network/endpoints/endpoint_data.yaml
index 46148dfb..a74d75da 100644
--- a/network/endpoints/endpoint_data.yaml
+++ b/network/endpoints/endpoint_data.yaml
@@ -1,6 +1,15 @@
# Data in this file is used to generate the endpoint_map.yaml template.
# Run the script build_endpoint_map.py to regenerate the file.
+Aodh:
+ Internal:
+ vip_param: AodhApi
+ Public:
+ vip_param: Public
+ Admin:
+ vip_param: AodhApi
+ port: 8042
+
Ceilometer:
Internal:
vip_param: CeilometerApi
diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
index 8f0f587a..956fb0ba 100644
--- a/network/endpoints/endpoint_map.yaml
+++ b/network/endpoints/endpoint_map.yaml
@@ -8,6 +8,7 @@ description: A map of OpenStack endpoints. Since the endpoints are URLs,
parameters come from net_ip_uri_map, which will include these brackets
in IPv6 addresses.
parameters:
+ AodhApiVirtualIP: {type: string, default: ''}
CeilometerApiVirtualIP: {type: string, default: ''}
CinderApiVirtualIP: {type: string, default: ''}
GlanceApiVirtualIP: {type: string, default: ''}
@@ -23,6 +24,9 @@ parameters:
EndpointMap:
type: json
default:
+ AodhAdmin: {protocol: http, port: '8042', host: IP_ADDRESS}
+ AodhInternal: {protocol: http, port: '8042', host: IP_ADDRESS}
+ AodhPublic: {protocol: http, port: '8042', host: IP_ADDRESS}
CeilometerAdmin: {protocol: http, port: '8777', host: IP_ADDRESS}
CeilometerInternal: {protocol: http, port: '8777', host: IP_ADDRESS}
CeilometerPublic: {protocol: http, port: '8777', host: IP_ADDRESS}
@@ -67,6 +71,120 @@ parameters:
outputs:
endpoint_map:
value:
+ AodhAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, AodhAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: AodhApiVirtualIP}
+ port:
+ get_param: [EndpointMap, AodhAdmin, port]
+ protocol:
+ get_param: [EndpointMap, AodhAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, AodhAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, AodhAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: AodhApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, AodhAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, AodhAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, AodhAdmin, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: AodhApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, AodhAdmin, port]
+ AodhInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, AodhInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: AodhApiVirtualIP}
+ port:
+ get_param: [EndpointMap, AodhInternal, port]
+ protocol:
+ get_param: [EndpointMap, AodhInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, AodhInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, AodhInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: AodhApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, AodhInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, AodhInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, AodhInternal, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: AodhApiVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, AodhInternal, port]
+ AodhPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, AodhPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ port:
+ get_param: [EndpointMap, AodhPublic, port]
+ protocol:
+ get_param: [EndpointMap, AodhPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, AodhPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, AodhPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, AodhPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, AodhPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, AodhPublic, host]
+ params:
+ CLOUDNAME: {get_param: CloudName}
+ IP_ADDRESS: {get_param: PublicVirtualIP}
+ - ':'
+ - get_param: [EndpointMap, AodhPublic, port]
CeilometerAdmin:
host:
str_replace:
diff --git a/overcloud.yaml b/overcloud.yaml
index 476e82ed..cdd7af66 100644
--- a/overcloud.yaml
+++ b/overcloud.yaml
@@ -16,6 +16,11 @@ parameters:
description: The password for the keystone admin account, used for monitoring, querying neutron etc.
type: string
hidden: true
+ AodhPassword:
+ default: unset
+ description: The password for the aodh services.
+ type: string
+ hidden: true
CeilometerBackend:
default: 'mongodb'
description: The ceilometer backend type.
@@ -104,6 +109,10 @@ parameters:
type: string
constraints:
- custom_constraint: nova.keypair
+ MemcachedIPv6:
+ default: false
+ description: Enable IPv6 features in Memcached.
+ type: boolean
NeutronExternalNetworkBridge:
description: Name of bridge used for external network traffic.
type: string
@@ -671,6 +680,7 @@ parameters:
default:
NeutronTenantNetwork: tenant
CeilometerApiNetwork: internal_api
+ AodhApiNetwork: internal_api
MongoDbNetwork: internal_api
CinderApiNetwork: internal_api
CinderIscsiNetwork: storage
@@ -870,6 +880,7 @@ resources:
properties:
CloudName: {get_param: CloudName}
CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ AodhApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
CinderApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
GlanceApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
HeatApiVirtualIP: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
@@ -893,6 +904,7 @@ resources:
properties:
AdminPassword: {get_param: AdminPassword}
AdminToken: {get_param: AdminToken}
+ AodhPassword: {get_param: AodhPassword}
CeilometerBackend: {get_param: CeilometerBackend}
CeilometerMeteringSecret: {get_param: CeilometerMeteringSecret}
CeilometerPassword: {get_param: CeilometerPassword}
@@ -939,6 +951,7 @@ resources:
KeystoneSSLCertificateKey: {get_param: KeystoneSSLCertificateKey}
KeystoneNotificationDriver: {get_param: KeystoneNotificationDriver}
KeystoneNotificationFormat: {get_param: KeystoneNotificationFormat}
+ MemcachedIPv6: {get_param: MemcachedIPv6}
MysqlClusterUniquePart: {get_attr: [MysqlClusterUniquePart, value]}
MysqlInnodbBufferPoolSize: {get_param: MysqlInnodbBufferPoolSize}
MysqlMaxConnections: {get_param: MysqlMaxConnections}
@@ -1007,6 +1020,7 @@ resources:
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
CeilometerApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ AodhApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
CinderApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
HeatApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
HeatApiVirtualIPUri: {get_attr: [VipMap, net_ip_uri_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
@@ -1245,6 +1259,7 @@ resources:
heat_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
swift_proxy_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
ceilometer_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ aodh_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
nova_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
nova_metadata_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
glance_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
@@ -1365,6 +1380,7 @@ resources:
nova_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
nova_metadata_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
ceilometer_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ aodh_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
heat_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HeatApiNetwork]}]}
horizon_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
redis_vip: {get_attr: [RedisVirtualIP, ip_address]}
@@ -1647,6 +1663,9 @@ outputs:
PublicVip:
description: Controller VIP for public API endpoints
value: {get_attr: [VipMap, net_ip_map, external]}
+ AodhInternalVip:
+ description: VIP for Aodh API internal endpoint
+ value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
CeilometerInternalVip:
description: VIP for Ceilometer API internal endpoint
value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index 2f2a1e9d..fed9dd31 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -34,6 +34,8 @@ parameters:
type: comma_delimited_list
ceilometer_api_node_ips:
type: comma_delimited_list
+ aodh_api_node_ips:
+ type: comma_delimited_list
nova_api_node_ips:
type: comma_delimited_list
nova_metadata_node_ips:
@@ -187,6 +189,14 @@ resources:
list_join:
- "','"
- {get_param: ceilometer_api_node_ips}
+ aodh_api_node_ips:
+ str_replace:
+ template: "['SERVERS_LIST']"
+ params:
+ SERVERS_LIST:
+ list_join:
+ - "','"
+ - {get_param: aodh_api_node_ips}
nova_api_node_ips:
str_replace:
template: "['SERVERS_LIST']"
@@ -272,6 +282,7 @@ resources:
# NOTE(gfidente): interpolation with %{} in the
# hieradata file can't be used as it returns string
ceilometer::rabbit_hosts: *rabbit_nodes_array
+ aodh::rabbit_hosts: *rabbit_nodes_array
cinder::rabbit_hosts: *rabbit_nodes_array
glance::notify::rabbitmq::rabbit_hosts: *rabbit_nodes_array
heat::rabbit_hosts: *rabbit_nodes_array
diff --git a/puppet/controller.yaml b/puppet/controller.yaml
index bdb124c6..4086fe4e 100644
--- a/puppet/controller.yaml
+++ b/puppet/controller.yaml
@@ -17,6 +17,14 @@ parameters:
description: The keystone auth secret and db password.
type: string
hidden: true
+ AodhApiVirtualIP:
+ type: string
+ default: ''
+ AodhPassword:
+ default: unset
+ description: The password for the aodh services.
+ type: string
+ hidden: true
CeilometerApiVirtualIP:
type: string
default: ''
@@ -333,6 +341,10 @@ parameters:
default: false
description: Whether to manage IPtables rules.
type: boolean
+ MemcachedIPv6:
+ default: false
+ description: Enable IPv6 features in Memcached.
+ type: boolean
PurgeFirewallRules:
default: false
description: Whether IPtables rules should be purged before setting up the new ones.
@@ -1160,13 +1172,15 @@ resources:
ceilometer_metering_secret: {get_param: CeilometerMeteringSecret}
ceilometer_password: {get_param: CeilometerPassword}
ceilometer_store_events: {get_param: CeilometerStoreEvents}
+ aodh_password: {get_param: AodhPassword}
ceilometer_coordination_url:
list_join:
- ''
- - - 'redis://'
- - {get_param: RedisVirtualIPUri}
- - ':6379/?password='
+ - - 'redis://:'
- {get_param: RedisPassword}
+ - '@'
+ - {get_param: RedisVirtualIPUri}
+ - ':6379/'
ceilometer_dsn:
list_join:
- ''
@@ -1180,6 +1194,7 @@ resources:
nova_enable_db_purge: {get_param: NovaEnableDBPurge}
nova_ipv6: {get_param: NovaIPv6}
corosync_ipv6: {get_param: CorosyncIPv6}
+ memcached_ipv6: {get_param: MemcachedIPv6}
nova_password: {get_param: NovaPassword}
nova_dsn:
list_join:
@@ -1234,7 +1249,7 @@ resources:
- '/sahara'
swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
- cinder_iscsi_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]}
+ cinder_iscsi_network: {get_attr: [NetIpMap, net_ip_uri_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]}
cinder_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderApiNetwork]}]}
glance_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceApiNetwork]}]}
glance_registry_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, GlanceRegistryNetwork]}]}
@@ -1248,6 +1263,7 @@ resources:
neutron_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]}
ceilometer_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CeilometerApiNetwork]}]}
+ aodh_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, AodhApiNetwork]}]}
nova_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
nova_metadata_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaMetadataNetwork]}]}
horizon_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, HorizonNetwork]}]}
@@ -1552,6 +1568,24 @@ resources:
snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
snmpd_readonly_user_password: {get_input: snmpd_readonly_user_password}
+ # Aodh
+ aodh::rabbit_userid: {get_input: rabbit_username}
+ aodh::rabbit_password: {get_input: rabbit_password}
+ aodh::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
+ aodh::rabbit_port: {get_input: rabbit_client_port}
+ aodh::debug: {get_input: debug}
+ aodh::wsgi::apache::ssl: false
+ aodh::wsgi::apache::bind_host: {get_input: aodh_api_network}
+ aodh::api::service_name: 'httpd'
+ aodh::api::host: {get_input: aodh_api_network}
+ aodh::api::keystone_password: {get_input: aodh_password}
+ aodh::api::keystone_auth_uri: {get_input: keystone_auth_uri}
+ aodh::api::keystone_identity_uri: {get_input: keystone_identity_uri}
+ aodh::auth::auth_password: {get_input: aodh_password}
+ aodh::db::mysql::password: {get_input: aodh_password}
+ # for a migration path from ceilometer-alarm to aodh, we use the same database & coordination
+ aodh::evaluator::coordination_url: {get_input: ceilometer_coordination_url}
+
# Nova
nova::rabbit_userid: {get_input: rabbit_username}
nova::rabbit_password: {get_input: rabbit_password}
@@ -1630,6 +1664,7 @@ resources:
tripleo::firewall::manage_firewall: {get_input: manage_firewall}
tripleo::firewall::purge_firewall_rules: {get_input: purge_firewall_rules}
# Misc
+ memcached_ipv6: {get_input: memcached_ipv6}
memcached::listen_ip: {get_input: memcached_network}
neutron_public_interface_ip: {get_input: neutron_public_interface_ip}
ntp::servers: {get_input: ntp_servers}
@@ -1642,6 +1677,7 @@ resources:
tripleo::loadbalancer::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]}
tripleo::loadbalancer::haproxy_stats_user: {get_input: haproxy_stats_user}
tripleo::loadbalancer::haproxy_stats_password: {get_input: haproxy_stats_password}
+ tripleo::loadbalancer::redis_password: {get_input: redis_password}
tripleo::packages::enable_install: {get_input: enable_package_install}
tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
diff --git a/puppet/hieradata/common.yaml b/puppet/hieradata/common.yaml
index 98cec364..46471c3b 100644
--- a/puppet/hieradata/common.yaml
+++ b/puppet/hieradata/common.yaml
@@ -5,6 +5,9 @@ ssh::server::storeconfigs_enabled: false
ceilometer::agent::auth::auth_region: 'regionOne'
ceilometer::agent::auth::auth_tenant_name: 'service'
+aodh::auth::auth_region: 'regionOne'
+aodh::auth::auth_tenant_name: 'service'
+
nova::api::admin_tenant_name: 'service'
nova::network::neutron::neutron_project_name: 'service'
nova::network::neutron::neutron_username: 'neutron'
diff --git a/puppet/hieradata/controller.yaml b/puppet/hieradata/controller.yaml
index 288d224f..3e523f1b 100644
--- a/puppet/hieradata/controller.yaml
+++ b/puppet/hieradata/controller.yaml
@@ -35,6 +35,7 @@ keystone::roles::admin::email: 'root@localhost'
# service tenant
glance::api::keystone_tenant: 'service'
+aodh::api::keystone_tenant: 'service'
glance::registry::keystone_tenant: 'service'
neutron::server::auth_tenant: 'service'
neutron::agents::metadata::auth_tenant: 'service'
@@ -80,7 +81,6 @@ glance::api::pipeline: 'keystone'
glance::api::show_image_direct_url: true
glance::registry::pipeline: 'keystone'
glance::backend::swift::swift_store_create_container_on_put: true
-glance::backend::rbd::rbd_store_user: 'openstack'
glance_file_pcmk_directory: '/var/lib/glance/images'
# neutron
@@ -150,6 +150,7 @@ tripleo::loadbalancer::redis: true
tripleo::loadbalancer::sahara: true
tripleo::loadbalancer::swift_proxy_server: true
tripleo::loadbalancer::ceilometer: true
+tripleo::loadbalancer::aodh: true
tripleo::loadbalancer::heat_api: true
tripleo::loadbalancer::heat_cloudwatch: true
tripleo::loadbalancer::heat_cfn: true
@@ -262,3 +263,7 @@ tripleo::firewall::firewall_rules:
'127 snmp':
port: 161
proto: 'udp'
+ '128 aodh':
+ port:
+ - 8042
+ - 13042
diff --git a/puppet/manifests/overcloud_controller.pp b/puppet/manifests/overcloud_controller.pp
index 5556a40c..5b3e8f77 100644
--- a/puppet/manifests/overcloud_controller.pp
+++ b/puppet/manifests/overcloud_controller.pp
@@ -123,6 +123,7 @@ if hiera('step') >= 2 {
include ::sahara::db::mysql
if downcase(hiera('ceilometer_backend')) == 'mysql' {
include ::ceilometer::db::mysql
+ include ::aodh::db::mysql
}
$rabbit_nodes = hiera('rabbit_node_ips')
@@ -267,8 +268,15 @@ if hiera('step') >= 3 {
include ::glance::notify::rabbitmq
include join(['::glance::backend::', $glance_backend])
+ $nova_ipv6 = hiera('nova::use_ipv6', false)
+ if $nova_ipv6 {
+ $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
+ } else {
+ $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
+ }
+
class { '::nova' :
- memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'),
+ memcached_servers => $memcached_servers
}
include ::nova::config
include ::nova::api
@@ -603,6 +611,21 @@ if hiera('step') >= 3 {
Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
+ # Aodh
+ class { '::aodh' :
+ database_connection => $ceilometer_database_connection,
+ }
+ include ::aodh::db::sync
+ # To manage the upgrade:
+ Exec['ceilometer-dbsync'] -> Exec['aodh-db-sync']
+ include ::aodh::auth
+ include ::aodh::api
+ include ::aodh::wsgi::apache
+ include ::aodh::evaluator
+ include ::aodh::notifier
+ include ::aodh::listener
+ include ::aodh::client
+
# Heat
class { '::heat' :
notification_driver => 'messaging',
@@ -626,8 +649,15 @@ if hiera('step') >= 3 {
}
$neutron_options = {'profile_support' => $_profile_support }
+ $memcached_ipv6 = hiera('memcached_ipv6', false)
+ if $memcached_ipv6 {
+ $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
+ } else {
+ $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
+ }
+
class { '::horizon':
- cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
+ cache_server_ip => $horizon_memcached_servers,
neutron_options => $neutron_options,
}
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
index db3d8652..7c5fd6bd 100644
--- a/puppet/manifests/overcloud_controller_pacemaker.pp
+++ b/puppet/manifests/overcloud_controller_pacemaker.pp
@@ -68,9 +68,6 @@ if hiera('step') >= 1 {
} else {
$cluster_setup_extras = {}
}
- user { 'hacluster':
- ensure => present,
- } ->
class { '::pacemaker':
hacluster_pwd => hiera('hacluster_pwd'),
} ->
@@ -1121,11 +1118,45 @@ if hiera('step') >= 3 {
$_profile_support = 'None'
}
$neutron_options = {'profile_support' => $_profile_support }
+
+ $memcached_ipv6 = hiera('memcached_ipv6', false)
+ if $memcached_ipv6 {
+ $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
+ } else {
+ $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
+ }
+
class { '::horizon':
- cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
+ cache_server_ip => $horizon_memcached_servers,
neutron_options => $neutron_options,
}
+ # Aodh
+ class { '::aodh' :
+ database_connection => $ceilometer_database_connection,
+ }
+ include ::aodh::config
+ include ::aodh::auth
+ include ::aodh::client
+ include ::aodh::wsgi::apache
+ class { '::aodh::api':
+ manage_service => false,
+ enabled => false,
+ service_name => 'httpd',
+ }
+ class { '::aodh::evaluator':
+ manage_service => false,
+ enabled => false,
+ }
+ class { '::aodh::notifier':
+ manage_service => false,
+ enabled => false,
+ }
+ class { '::aodh::listener':
+ manage_service => false,
+ enabled => false,
+ }
+
$snmpd_user = hiera('snmpd_readonly_user_name')
snmp::snmpv3_user { $snmpd_user:
authtype => 'MD5',
@@ -1616,7 +1647,7 @@ if hiera('step') >= 4 {
Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
}
- # Ceilometer
+ # Ceilometer and Aodh
case downcase(hiera('ceilometer_backend')) {
/mysql/: {
pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
@@ -1649,8 +1680,10 @@ if hiera('step') >= 4 {
# Fedora doesn't know `require-all` parameter for constraints yet
if $::operatingsystem == 'Fedora' {
$redis_ceilometer_constraint_params = undef
+ $redis_aodh_constraint_params = undef
} else {
$redis_ceilometer_constraint_params = 'require-all=false'
+ $redis_aodh_constraint_params = 'require-all=false'
}
pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
constraint_type => 'order',
@@ -1662,6 +1695,16 @@ if hiera('step') >= 4 {
require => [Pacemaker::Resource::Ocf['redis'],
Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
}
+ pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint':
+ constraint_type => 'order',
+ first_resource => 'redis-master',
+ second_resource => "${::aodh::params::evaluator_service_name}-clone",
+ first_action => 'promote',
+ second_action => 'start',
+ constraint_params => $redis_aodh_constraint_params,
+ require => [Pacemaker::Resource::Ocf['redis'],
+ Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]],
+ }
pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
constraint_type => 'order',
first_resource => 'openstack-core-clone',
@@ -1712,6 +1755,55 @@ if hiera('step') >= 4 {
require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
Pacemaker::Resource::Ocf['delay']],
}
+ # Aodh
+ pacemaker::resource::service { $::aodh::params::evaluator_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::resource::service { $::aodh::params::notifier_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::resource::service { $::aodh::params::listener_service_name :
+ clone_params => 'interleave=true',
+ }
+ pacemaker::constraint::base { 'aodh-delay-then-aodh-evaluator-constraint':
+ constraint_type => 'order',
+ first_resource => 'delay-clone',
+ second_resource => "${::aodh::params::evaluator_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Ocf['delay']],
+ }
+ pacemaker::constraint::colocation { 'aodh-evaluator-with-aodh-delay-colocation':
+ source => "${::aodh::params::evaluator_service_name}-clone",
+ target => 'delay-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Ocf['delay']],
+ }
+ pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint':
+ constraint_type => 'order',
+ first_resource => "${::aodh::params::evaluator_service_name}-clone",
+ second_resource => "${::aodh::params::notifier_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
+ }
+ pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation':
+ source => "${::aodh::params::notifier_service_name}-clone",
+ target => "${::aodh::params::evaluator_service_name}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
+ }
+ pacemaker::constraint::colocation { 'aodh-listener-with-aodh-evaluator-colocation':
+ source => "${::aodh::params::listener_service_name}-clone",
+ target => "${::aodh::params::evaluator_service_name}-clone",
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
+ Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
+ }
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
constraint_type => 'order',
diff --git a/puppet/vip-config.yaml b/puppet/vip-config.yaml
index 5e2f698f..ebecd0cb 100644
--- a/puppet/vip-config.yaml
+++ b/puppet/vip-config.yaml
@@ -26,6 +26,7 @@ resources:
nova_api_vip: {get_input: nova_api_vip}
nova_metadata_vip: {get_input: nova_metadata_vip}
ceilometer_api_vip: {get_input: ceilometer_api_vip}
+ aodh_api_vip: {get_input: aodh_api_vip}
heat_api_vip: {get_input: heat_api_vip}
horizon_vip: {get_input: horizon_vip}
redis_vip: {get_input: redis_vip}
diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
index fe690d8c..2da873d0 100755
--- a/tools/yaml-validate.py
+++ b/tools/yaml-validate.py
@@ -24,10 +24,19 @@ def exit_usage():
def validate(filename):
print('Validating %s' % filename)
try:
- yaml.load(open(filename).read())
+ tpl = yaml.load(open(filename).read())
except Exception:
print(traceback.format_exc())
return 1
+ # yaml is OK, now walk the parameters and output a warning for unused ones
+ for p in tpl.get('parameters', {}):
+ str_p = '\'%s\'' % p
+ in_resources = str_p in str(tpl.get('resources', {}))
+ in_outputs = str_p in str(tpl.get('outputs', {}))
+ if not in_resources and not in_outputs:
+ print('Warning: parameter %s in template %s appears to be unused'
+ % (p, filename))
+
return 0
if len(sys.argv) < 2: