aboutsummaryrefslogtreecommitdiffstats
path: root/puppet/services
diff options
context:
space:
mode:
Diffstat (limited to 'puppet/services')
-rw-r--r--puppet/services/README.rst22
-rw-r--r--puppet/services/ceph-mon.yaml21
-rw-r--r--puppet/services/ceph-osd.yaml41
-rw-r--r--puppet/services/cinder-api.yaml3
-rw-r--r--puppet/services/gnocchi-metricd.yaml2
-rw-r--r--puppet/services/horizon.yaml10
-rw-r--r--puppet/services/ironic-api.yaml4
-rw-r--r--puppet/services/ironic-conductor.yaml7
-rw-r--r--puppet/services/services.yaml5
-rw-r--r--puppet/services/snmp.yaml4
-rw-r--r--puppet/services/swift-ringbuilder.yaml12
-rw-r--r--puppet/services/swift-storage.yaml13
12 files changed, 141 insertions, 3 deletions
diff --git a/puppet/services/README.rst b/puppet/services/README.rst
index 6e4e9c1d..34cb350b 100644
--- a/puppet/services/README.rst
+++ b/puppet/services/README.rst
@@ -49,6 +49,28 @@ are re-asserted when applying latter ones.
5) Service activation (Pacemaker)
+Batch Upgrade Steps
+-------------------
+
+Each service template may optionally define a `upgrade_batch_tasks` key, which
+is a list of ansible tasks to be performed during the upgrade process.
+
+Similar to the step_config, we allow a series of steps for the per-service
+upgrade sequence, defined as ansible tasks with a tag e.g "step1" for the first
+step, "step2" for the second, etc. Note that each step is performed in batches,
+then we move on to the next step which is also performed in batches (we don't
+perform all steps on one node, then move on to the next one which means you
+can sequence rolling upgrades of dependent services via the step value).
+
+The tasks performed at each step is service specific, but note that all batch
+upgrade steps are performed before the `upgrade_tasks` described below. This
+means that all services that support rolling upgrades can be upgraded without
+downtime during `upgrade_batch_tasks`, then any remaining services are stopped
+and upgraded during `upgrade_tasks`
+
+The default batch size is 1, but this can be overridden for each role via the
+`upgrade_batch_size` option in roles_data.yaml
+
Upgrade Steps
-------------
diff --git a/puppet/services/ceph-mon.yaml b/puppet/services/ceph-mon.yaml
index 68ad69b7..0c61305d 100644
--- a/puppet/services/ceph-mon.yaml
+++ b/puppet/services/ceph-mon.yaml
@@ -113,3 +113,24 @@ outputs:
get_attr: [CephBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceph::mon
+ upgrade_batch_tasks:
+ # Note we perform these tasks in list order, but they are all step0 so
+ # we can perform a rolling upgrade of all mon nodes in step0, then a
+ # rolling upgrade of all osd nodes in step1
+ # FIXME(shardy) I suspect we can use heat or ansible facts here instead?
+ - name: Get hostname
+ tags: step0
+ shell: hostname -s
+ register: mon_id
+ - name: Stop Ceph Mon
+ tags: step0
+ service: name=ceph-mon@{{mon_id.stdout}} pattern=ceph-mon state=stopped
+ - name: Update ceph packages
+ tags: step0
+ yum: name=ceph-mon,ceph state=latest
+ - name: Start ceph-mon service
+ tags: step0
+ service: name=ceph-mon@{{mon_id.stdout}} state=started
+ - name: ceph osd crush tunables default
+ tags: step0
+ shell: ceph osd crush tunables default
diff --git a/puppet/services/ceph-osd.yaml b/puppet/services/ceph-osd.yaml
index df0ee6c3..e9ed6c29 100644
--- a/puppet/services/ceph-osd.yaml
+++ b/puppet/services/ceph-osd.yaml
@@ -45,3 +45,44 @@ outputs:
- '6800-7300'
step_config: |
include ::tripleo::profile::base::ceph::osd
+ upgrade_batch_tasks:
+ - name: Get OSD IDs
+ tags: step1
+ shell: ls /var/lib/ceph/osd | awk 'BEGIN { FS = "-" } ; { print $2 }'
+ register: osd_ids
+ # "so that mirrors aren't rebalanced as if the OSD died" - gfidente / leseb
+ - name: ceph osd set noout
+ tags: step1
+ command: ceph osd set noout
+ - name: ceph osd set norebalance
+ tags: step1
+ command: ceph osd set norebalance
+ - name: ceph osd set nodeep-scrub
+ tags: step1
+ command: ceph osd set nodeep-scrub
+ - name: ceph osd set noscrub
+ tags: step1
+ command: ceph osd set noscrub
+ - name: Stop Ceph OSD
+ tags: step1
+ service: name=ceph-osd@$item state=stopped
+ with_items: "{{osd_ids.stdout.strip().split()}}"
+ - name: Update ceph OSD packages
+ tags: step1
+ yum: name=ceph-osd state=latest
+ - name: Start ceph-osd service
+ tags: step1
+ service: name=ceph-osd@$item state=started
+ with_items: "{{osd_ids.stdout.strip().split()}}"
+ - name: ceph osd unset noout
+ tags: step1
+ command: ceph osd unset noout
+ - name: ceph osd unset norebalance
+ tags: step1
+ command: ceph osd unset norebalance
+ - name: ceph osd unset nodeep-scrub
+ tags: step1
+ command: ceph osd unset nodeep-scrub
+ - name: ceph osd unset noscrub
+ tags: step1
+ command: ceph osd unset noscrub
diff --git a/puppet/services/cinder-api.yaml b/puppet/services/cinder-api.yaml
index a5c912ed..e3c96325 100644
--- a/puppet/services/cinder-api.yaml
+++ b/puppet/services/cinder-api.yaml
@@ -149,6 +149,9 @@ outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
+ - name: "PreUpgrade step0: Check service openstack-cinder-api is running"
+ shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b'
+ tags: step0,validation
- name: check for cinder running under apache (post upgrade)
tags: step2
shell: "apachectl -t -D DUMP_VHOSTS | grep -q cinder"
diff --git a/puppet/services/gnocchi-metricd.yaml b/puppet/services/gnocchi-metricd.yaml
index e5f9a8e7..27700606 100644
--- a/puppet/services/gnocchi-metricd.yaml
+++ b/puppet/services/gnocchi-metricd.yaml
@@ -22,7 +22,7 @@ parameters:
default: 'overcloud-gnocchi-metricd'
type: string
GnocchiMetricdWorkers:
- default: ''
+ default: '%{::os_workers}'
description: Number of workers for Gnocchi MetricD
type: string
diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml
index cf35d202..2111021b 100644
--- a/puppet/services/horizon.yaml
+++ b/puppet/services/horizon.yaml
@@ -27,6 +27,14 @@ parameters:
description: A list of IP/Hostname for the server Horizon is running on.
Used for header checks.
type: comma_delimited_list
+ HorizonPasswordValidator:
+ description: Regex for password validation
+ type: string
+ default: ''
+ HorizonPasswordValidatorHelp:
+ description: Help text for password validation
+ type: string
+ default: ''
HorizonSecret:
description: Secret key for Django
type: string
@@ -71,6 +79,8 @@ outputs:
options: ['FollowSymLinks','MultiViews']
horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]}
horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ horizon::password_validator: {get_param: [HorizonPasswordValidator]}
+ horizon::password_validator_help: {get_param: [HorizonPasswordValidatorHelp]}
horizon::secret_key:
yaql:
expression: $.data.passwords.where($ != '').first()
diff --git a/puppet/services/ironic-api.yaml b/puppet/services/ironic-api.yaml
index aebb37b2..ff91eb63 100644
--- a/puppet/services/ironic-api.yaml
+++ b/puppet/services/ironic-api.yaml
@@ -81,3 +81,7 @@ outputs:
ironic::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ upgrade_tasks:
+ - name: Stop ironic_api service
+ tags: step2
+ service: name=openstack-ironic-api state=stopped
diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml
index 194afec7..a10c03a5 100644
--- a/puppet/services/ironic-conductor.yaml
+++ b/puppet/services/ironic-conductor.yaml
@@ -98,3 +98,10 @@ outputs:
step_config: |
include ::tripleo::profile::base::ironic::conductor
+ upgrade_tasks:
+ - name: Stop ironic_conductor service
+ tags: step2
+ service: name=openstack-ironic-conductor state=stopped
+ - name: Sync ironic_conductor DB
+ tags: step5
+ command: ironic-dbsync
diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml
index 90268c78..80da5352 100644
--- a/puppet/services/services.yaml
+++ b/puppet/services/services.yaml
@@ -118,4 +118,9 @@ outputs:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
+ upgrade_batch_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
+ expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
diff --git a/puppet/services/snmp.yaml b/puppet/services/snmp.yaml
index be9d143e..fd6ed818 100644
--- a/puppet/services/snmp.yaml
+++ b/puppet/services/snmp.yaml
@@ -43,3 +43,7 @@ outputs:
proto: 'udp'
step_config: |
include ::tripleo::profile::base::snmp
+ upgrade_tasks:
+ - name: Stop snmp service
+ tags: step2
+ service: name=snmpd state=stopped
diff --git a/puppet/services/swift-ringbuilder.yaml b/puppet/services/swift-ringbuilder.yaml
index a7ba7bad..2e3c818f 100644
--- a/puppet/services/swift-ringbuilder.yaml
+++ b/puppet/services/swift-ringbuilder.yaml
@@ -43,6 +43,16 @@ parameters:
description: 'Use a local directory for Swift storage services when building rings'
type: boolean
+conditions:
+ swift_use_local_dir:
+ and:
+ - equals:
+ - get_param: SwiftUseLocalDir
+ - true
+ - equals:
+ - get_param: SwiftRawDisks
+ - {}
+
outputs:
role_data:
description: Role data for Swift Ringbuilder configuration.
@@ -59,7 +69,7 @@ outputs:
expression: $.data.raw_disk_lists.flatten()
data:
raw_disk_lists:
- - {if: [{get_param: SwiftUseLocalDir}, [':%PORT%/d1'], []]}
+ - {if: [swift_use_local_dir, [':%PORT%/d1'], []]}
- repeat:
template: ':%PORT%/DEVICE'
for_each:
diff --git a/puppet/services/swift-storage.yaml b/puppet/services/swift-storage.yaml
index 08df928d..247b23ff 100644
--- a/puppet/services/swift-storage.yaml
+++ b/puppet/services/swift-storage.yaml
@@ -56,6 +56,17 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+conditions:
+ swift_mount_check:
+ or:
+ - equals:
+ - get_param: SwiftMountCheck
+ - true
+ - not:
+ equals:
+ - get_param: SwiftRawDisks
+ - {}
+
outputs:
role_data:
description: Role data for the Swift Proxy role.
@@ -65,7 +76,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [SwiftBase, role_data, config_settings]
- - swift::storage::all::mount_check: {get_param: SwiftMountCheck}
+ - swift::storage::all::mount_check: {if: [swift_mount_check, true, false]}
tripleo::profile::base::swift::storage::enable_swift_storage: {get_param: ControllerEnableSwiftStorage}
tripleo.swift_storage.firewall_rules:
'123 swift storage':