diff options
author | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 12:17:53 -0700 |
---|---|---|
committer | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 15:44:42 -0700 |
commit | 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch) | |
tree | 1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/include/linux/bit_spinlock.h | |
parent | 98260f3884f4a202f9ca5eabed40b1354c489b29 (diff) |
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base.
It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and
the base is:
commit 0917f823c59692d751951bf5ea699a2d1e2f26a2
Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat Jul 25 12:13:34 2015 +0200
Prepare v4.1.3-rt3
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
We lose all the git history this way and it's not good. We
should apply another opnfv project repo in future.
Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/include/linux/bit_spinlock.h')
-rw-r--r-- | kernel/include/linux/bit_spinlock.h | 100 |
1 files changed, 100 insertions, 0 deletions
diff --git a/kernel/include/linux/bit_spinlock.h b/kernel/include/linux/bit_spinlock.h new file mode 100644 index 000000000..3b5bafce4 --- /dev/null +++ b/kernel/include/linux/bit_spinlock.h @@ -0,0 +1,100 @@ +#ifndef __LINUX_BIT_SPINLOCK_H +#define __LINUX_BIT_SPINLOCK_H + +#include <linux/kernel.h> +#include <linux/preempt.h> +#include <linux/atomic.h> +#include <linux/bug.h> + +/* + * bit-based spin_lock() + * + * Don't use this unless you really need to: spin_lock() and spin_unlock() + * are significantly faster. + */ +static inline void bit_spin_lock(int bitnum, unsigned long *addr) +{ + /* + * Assuming the lock is uncontended, this never enters + * the body of the outer loop. If it is contended, then + * within the inner loop a non-atomic test is used to + * busywait with less bus contention for a good time to + * attempt to acquire the lock bit. + */ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + while (unlikely(test_and_set_bit_lock(bitnum, addr))) { + preempt_enable(); + do { + cpu_relax(); + } while (test_bit(bitnum, addr)); + preempt_disable(); + } +#endif + __acquire(bitlock); +} + +/* + * Return true if it was acquired + */ +static inline int bit_spin_trylock(int bitnum, unsigned long *addr) +{ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + if (unlikely(test_and_set_bit_lock(bitnum, addr))) { + preempt_enable(); + return 0; + } +#endif + __acquire(bitlock); + return 1; +} + +/* + * bit-based spin_unlock() + */ +static inline void bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +#endif +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + clear_bit_unlock(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * bit-based spin_unlock() + * non-atomic version, which can be used eg. if the bit lock itself is + * protecting the rest of the flags in the word. + */ +static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +#endif +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __clear_bit_unlock(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * Return true if the lock is held. + */ +static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + return test_bit(bitnum, addr); +#elif defined CONFIG_PREEMPT_COUNT + return preempt_count(); +#else + return 1; +#endif +} + +#endif /* __LINUX_BIT_SPINLOCK_H */ + |