blob: 9adede74f8519f77ffbbe2d67889b52bfc0437df (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
|
meta:
- desc: |
Setup 4 node ceph cluster using ceph-deploy, use latest
stable jewel as initial release, upgrade to luminous and
also setup mgr nodes along after upgrade, check for
cluster to reach healthy state, After upgrade run kernel tar/untar
task and systemd task. This test will detect any
ceph upgrade issue and systemd issues.
overrides:
ceph-deploy:
fs: xfs
conf:
global:
mon pg warn min per osd: 2
osd:
osd pool default size: 2
osd objectstore: filestore
osd sloppy crc: true
client:
rbd default features: 5
openstack:
- machine:
disk: 100
- volumes:
count: 3
size: 30
# reluctantely :( hard-coded machine type
# it will override command line args with teuthology-suite
machine_type: vps
roles:
- - mon.a
- mds.a
- osd.0
- osd.1
- osd.2
- mgr.x
- - mon.b
- mgr.y
- - mon.c
- osd.3
- osd.4
- osd.5
- - osd.6
- osd.7
- osd.8
- client.0
tasks:
- ssh-keys:
- print: "**** done ssh-keys"
- ceph-deploy:
branch:
stable: jewel
skip-mgr: True
- print: "**** done initial ceph-deploy"
- ceph-deploy.upgrade:
branch:
dev: luminous
setup-mgr-node: True
check-for-healthy: True
roles:
- mon.a
- mon.b
- mon.c
- osd.6
- print: "**** done ceph-deploy upgrade"
- exec:
osd.0:
- ceph osd require-osd-release luminous
- ceph osd set-require-min-compat-client luminous
- print: "**** done `ceph osd require-osd-release luminous`"
- workunit:
clients:
all:
- kernel_untar_build.sh
- print: "**** done kernel_untar_build.sh"
- systemd:
- print: "**** done systemd"
- workunit:
clients:
all:
- rados/load-gen-mix.sh
- print: "**** done rados/load-gen-mix.sh"
|