summaryrefslogtreecommitdiffstats
path: root/kernel/include/asm-generic/irqflags.h
blob: 1f40d0024cf381d5c380f2a1c19b5c5ce3ae24e1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
#ifndef __ASM_GENERIC_IRQFLAGS_H
#define __ASM_GENERIC_IRQFLAGS_H

/*
 * All architectures should implement at least the first two functions,
 * usually inline assembly will be the best way.
 */
#ifndef ARCH_IRQ_DISABLED
#define ARCH_IRQ_DISABLED 0
#define ARCH_IRQ_ENABLED 1
#endif

/* read interrupt enabled status */
#ifndef arch_local_save_flags
unsigned long arch_local_save_flags(void);
#endif

/* set interrupt enabled status */
#ifndef arch_local_irq_restore
void arch_local_irq_restore(unsigned long flags);
#endif

/* get status and disable interrupts */
#ifndef arch_local_irq_save
static inline unsigned long arch_local_irq_save(void)
{
	unsigned long flags;
	flags = arch_local_save_flags();
	arch_local_irq_restore(ARCH_IRQ_DISABLED);
	return flags;
}
#endif

/* test flags */
#ifndef arch_irqs_disabled_flags
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
	return flags == ARCH_IRQ_DISABLED;
}
#endif

/* unconditionally enable interrupts */
#ifndef arch_local_irq_enable
static inline void arch_local_irq_enable(void)
{
	arch_local_irq_restore(ARCH_IRQ_ENABLED);
}
#endif

/* unconditionally disable interrupts */
#ifndef arch_local_irq_disable
static inline void arch_local_irq_disable(void)
{
	arch_local_irq_restore(ARCH_IRQ_DISABLED);
}
#endif

/* test hardware interrupt enable bit */
#ifndef arch_irqs_disabled
static inline int arch_irqs_disabled(void)
{
	return arch_irqs_disabled_flags(arch_local_save_flags());
}
#endif

#endif /* __ASM_GENERIC_IRQFLAGS_H */
/span> extern void queued_write_lock_slowpath(struct qrwlock *lock); /** * queued_read_can_lock- would read_trylock() succeed? * @lock: Pointer to queue rwlock structure */ static inline int queued_read_can_lock(struct qrwlock *lock) { return !(atomic_read(&lock->cnts) & _QW_WMASK); } /** * queued_write_can_lock- would write_trylock() succeed? * @lock: Pointer to queue rwlock structure */ static inline int queued_write_can_lock(struct qrwlock *lock) { return !atomic_read(&lock->cnts); } /** * queued_read_trylock - try to acquire read lock of a queue rwlock * @lock : Pointer to queue rwlock structure * Return: 1 if lock acquired, 0 if failed */ static inline int queued_read_trylock(struct qrwlock *lock) { u32 cnts; cnts = atomic_read(&lock->cnts); if (likely(!(cnts & _QW_WMASK))) { cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); if (likely(!(cnts & _QW_WMASK))) return 1; atomic_sub(_QR_BIAS, &lock->cnts); } return 0; } /** * queued_write_trylock - try to acquire write lock of a queue rwlock * @lock : Pointer to queue rwlock structure * Return: 1 if lock acquired, 0 if failed */ static inline int queued_write_trylock(struct qrwlock *lock) { u32 cnts; cnts = atomic_read(&lock->cnts); if (unlikely(cnts)) return 0; return likely(atomic_cmpxchg_acquire(&lock->cnts, cnts, cnts | _QW_LOCKED) == cnts); } /** * queued_read_lock - acquire read lock of a queue rwlock * @lock: Pointer to queue rwlock structure */ static inline void queued_read_lock(struct qrwlock *lock) { u32 cnts; cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); if (likely(!(cnts & _QW_WMASK))) return; /* The slowpath will decrement the reader count, if necessary. */ queued_read_lock_slowpath(lock, cnts); } /** * queued_write_lock - acquire write lock of a queue rwlock * @lock : Pointer to queue rwlock structure */ static inline void queued_write_lock(struct qrwlock *lock) { /* Optimize for the unfair lock case where the fair flag is 0. */ if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0) return; queued_write_lock_slowpath(lock); } /** * queued_read_unlock - release read lock of a queue rwlock * @lock : Pointer to queue rwlock structure */ static inline void queued_read_unlock(struct qrwlock *lock) { /* * Atomically decrement the reader count */ (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); } /** * queued_write_unlock - release write lock of a queue rwlock * @lock : Pointer to queue rwlock structure */ static inline void queued_write_unlock(struct qrwlock *lock) { smp_store_release((u8 *)&lock->cnts, 0); } /* * Remapping rwlock architecture specific functions to the corresponding * queue rwlock functions. */ #define arch_read_can_lock(l) queued_read_can_lock(l) #define arch_write_can_lock(l) queued_write_can_lock(l) #define arch_read_lock(l) queued_read_lock(l) #define arch_write_lock(l) queued_write_lock(l) #define arch_read_trylock(l) queued_read_trylock(l) #define arch_write_trylock(l) queued_write_trylock(l) #define arch_read_unlock(l) queued_read_unlock(l) #define arch_write_unlock(l) queued_write_unlock(l) #endif /* __ASM_GENERIC_QRWLOCK_H */