blob: b2fc171cc836bea462215fe929fee91c7a4ac212 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
|
tasks:
- parallel:
- workload
- upgrade-sequence
- print: "**** done parallel"
workload:
sequential:
- rados:
clients: [client.0]
pools: [base-pool]
ops: 4000
objects: 500
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
cache_flush: 50
cache_try_flush: 50
cache_evict: 50
- print: "**** done rados"
upgrade-sequence:
sequential:
- install.upgrade:
exclude_packages:
- ceph-mgr
- libcephfs2
- libcephfs-devel
- libcephfs-dev
osd.0:
branch: jewel
osd.2:
branch: jewel
- print: "*** done install.upgrade osd.0 and osd.2"
- ceph.restart:
daemons: [osd.0, osd.1, osd.2, osd.3]
wait-for-healthy: false
wait-for-osds-up: true
- ceph.restart:
daemons: [mon.a, mon.b, mon.c]
wait-for-healthy: false
wait-for-osds-up: true
- print: "**** done ceph.restart do not wait for healthy"
- exec:
mon.a:
- sleep 300 # http://tracker.ceph.com/issues/17808
- ceph osd set sortbitwise
- ceph osd set require_jewel_osds
- ceph.healthy:
- print: "**** done ceph.healthy"
|