summaryrefslogtreecommitdiffstats
path: root/kernel/include/linux/spinlock_rt.h
blob: f757096b230c7f622be21806b1e858f8634ce0a8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
#ifndef __LINUX_SPINLOCK_RT_H
#define __LINUX_SPINLOCK_RT_H

#ifndef __LINUX_SPINLOCK_H
#error Do not include directly. Use spinlock.h
#endif

#include <linux/bug.h>

extern void
__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);

#define spin_lock_init(slock)				\
do {							\
	static struct lock_class_key __key;		\
							\
	rt_mutex_init(&(slock)->lock);			\
	__rt_spin_lock_init(slock, #slock, &__key);	\
} while (0)

extern void __lockfunc rt_spin_lock(spinlock_t *lock);
extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);

/*
 * lockdep-less calls, for derived types like rwlock:
 * (for trylock they can use rt_mutex_trylock() directly.
 */
extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);

#define spin_lock(lock)				\
	do {					\
		migrate_disable();		\
		rt_spin_lock(lock);		\
	} while (0)

#define spin_lock_bh(lock)			\
	do {					\
		local_bh_disable();		\
		migrate_disable();		\
		rt_spin_lock(lock);		\
	} while (0)

#define spin_lock_irq(lock)		spin_lock(lock)

#define spin_do_trylock(lock)		__cond_lock(lock, rt_spin_trylock(lock))

#define spin_trylock(lock)			\
({						\
	int __locked;				\
	migrate_disable();			\
	__locked = spin_do_trylock(lock);	\
	if (!__locked)				\
		migrate_enable();		\
	__locked;				\
})

#ifdef CONFIG_LOCKDEP
# define spin_lock_nested(lock, subclass)		\
	do {						\
		migrate_disable();			\
		rt_spin_lock_nested(lock, subclass);	\
	} while (0)

#define spin_lock_bh_nested(lock, subclass)		\
	do {						\
		local_bh_disable();			\
		migrate_disable();			\
		rt_spin_lock_nested(lock, subclass);	\
	} while (0)

# define spin_lock_irqsave_nested(lock, flags, subclass) \
	do {						 \
		typecheck(unsigned long, flags);	 \
		flags = 0;				 \
		migrate_disable();			 \
		rt_spin_lock_nested(lock, subclass);	 \
	} while (0)
#else
# define spin_lock_nested(lock, subclass)	spin_lock(lock)
# define spin_lock_bh_nested(lock, subclass)	spin_lock_bh(lock)

# define spin_lock_irqsave_nested(lock, flags, subclass) \
	do {						 \
		typecheck(unsigned long, flags);	 \
		flags = 0;				 \
		spin_lock(lock);			 \
	} while (0)
#endif

#define spin_lock_irqsave(lock, flags)			 \
	do {						 \
		typecheck(unsigned long, flags);	 \
		flags = 0;				 \
		spin_lock(lock);			 \
	} while (0)

static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
{
	unsigned long flags = 0;
#ifdef CONFIG_TRACE_IRQFLAGS
	flags = rt_spin_lock_trace_flags(lock);
#else
	spin_lock(lock); /* lock_local */
#endif
	return flags;
}

/* FIXME: we need rt_spin_lock_nest_lock */
#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)

#define spin_unlock(lock)				\
	do {						\
		rt_spin_unlock(lock);			\
		migrate_enable();			\
	} while (0)

#define spin_unlock_bh(lock)				\
	do {						\
		rt_spin_unlock(lock);			\
		migrate_enable();			\
		local_bh_enable();			\
	} while (0)

#define spin_unlock_irq(lock)		spin_unlock(lock)

#define spin_unlock_irqrestore(lock, flags)		\
	do {						\
		typecheck(unsigned long, flags);	\
		(void) flags;				\
		spin_unlock(lock);			\
	} while (0)

#define spin_trylock_bh(lock)	__cond_lock(lock, rt_spin_trylock_bh(lock))
#define spin_trylock_irq(lock)	spin_trylock(lock)

#define spin_trylock_irqsave(lock, flags)	\
	rt_spin_trylock_irqsave(lock, &(flags))

#define spin_unlock_wait(lock)		rt_spin_unlock_wait(lock)

#ifdef CONFIG_GENERIC_LOCKBREAK
# define spin_is_contended(lock)	((lock)->break_lock)
#else
# define spin_is_contended(lock)	(((void)(lock), 0))
#endif

static inline int spin_can_lock(spinlock_t *lock)
{
	return !rt_mutex_is_locked(&lock->lock);
}

static inline int spin_is_locked(spinlock_t *lock)
{
	return rt_mutex_is_locked(&lock->lock);
}

static inline void assert_spin_locked(spinlock_t *lock)
{
	BUG_ON(!spin_is_locked(lock));
}

#define atomic_dec_and_lock(atomic, lock) \
	atomic_dec_and_spin_lock(atomic, lock)

#endif