summaryrefslogtreecommitdiffstats
path: root/src/ceph/qa/tasks/resolve_stuck_peering.py
blob: bdf86e9242e4928dc30aa6cc17438bee95a8b737 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
"""
Resolve stuck peering
"""
import logging
import time

from teuthology import misc as teuthology
from util.rados import rados

log = logging.getLogger(__name__)

def task(ctx, config):
    """
    Test handling resolve stuck peering

    requires 3 osds on a single test node
    """
    if config is None:
        config = {}
        assert isinstance(config, dict), \
            'Resolve stuck peering only accepts a dict for config'

    manager = ctx.managers['ceph']

    while len(manager.get_osd_status()['up']) < 3:
        time.sleep(10)


    manager.wait_for_clean()

    dummyfile = '/etc/fstab'
    dummyfile1 = '/etc/resolv.conf'

    #create 1 PG pool
    pool='foo'
    log.info('creating pool foo')
    manager.raw_cluster_cmd('osd', 'pool', 'create', '%s' % pool, '1')

    #set min_size of the pool to 1
    #so that we can continue with I/O
    #when 2 osds are down
    manager.set_pool_property(pool, "min_size", 1)

    osds = [0, 1, 2]

    primary = manager.get_pg_primary('foo', 0)
    log.info("primary osd is %d", primary)

    others = list(osds)
    others.remove(primary)

    log.info('writing initial objects')
    first_mon = teuthology.get_first_mon(ctx, config)
    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
    #create few objects
    for i in range(100):
        rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])

    manager.wait_for_clean()

    #kill other osds except primary
    log.info('killing other osds except primary')
    for i in others:
        manager.kill_osd(i)
    for i in others:
        manager.mark_down_osd(i)


    for i in range(100):
        rados(ctx, mon, ['-p', 'foo', 'put', 'new_%d' % i, dummyfile1])

    #kill primary osd
    manager.kill_osd(primary)
    manager.mark_down_osd(primary)

    #revive other 2 osds
    for i in others:
        manager.revive_osd(i)

    #make sure that pg is down
    #Assuming pg number for single pg pool will start from 0
    pgnum=0
    pgstr = manager.get_pgid(pool, pgnum)
    stats = manager.get_single_pg_stats(pgstr)
    print stats['state']

    timeout=60
    start=time.time()

    while 'down' not in stats['state']:
        assert time.time() - start < timeout, \
            'failed to reach down state before timeout expired'
        stats = manager.get_single_pg_stats(pgstr)

    #mark primary as lost
    manager.raw_cluster_cmd('osd', 'lost', '%d' % primary,\
                            '--yes-i-really-mean-it')


    #expect the pg status to be active+undersized+degraded
    #pg should recover and become active+clean within timeout
    stats = manager.get_single_pg_stats(pgstr)
    print stats['state']

    timeout=10
    start=time.time()

    while manager.get_num_down():
        assert time.time() - start < timeout, \
            'failed to recover before timeout expired'

    manager.revive_osd(primary)