summaryrefslogtreecommitdiffstats
path: root/kernel/include/linux/spinlock_api_smp.h
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/include/linux/spinlock_api_smp.h
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/include/linux/spinlock_api_smp.h')
-rw-r--r--kernel/include/linux/spinlock_api_smp.h196
1 files changed, 196 insertions, 0 deletions
diff --git a/kernel/include/linux/spinlock_api_smp.h b/kernel/include/linux/spinlock_api_smp.h
new file mode 100644
index 000000000..043263f30
--- /dev/null
+++ b/kernel/include/linux/spinlock_api_smp.h
@@ -0,0 +1,196 @@
+#ifndef __LINUX_SPINLOCK_API_SMP_H
+#define __LINUX_SPINLOCK_API_SMP_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+int in_lock_functions(unsigned long addr);
+
+#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
+
+void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+void __lockfunc
+_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
+ __acquires(lock);
+void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
+ __acquires(lock);
+
+unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc
+_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
+void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
+ __releases(lock);
+
+#ifdef CONFIG_INLINE_SPIN_LOCK
+#define _raw_spin_lock(lock) __raw_spin_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_LOCK_BH
+#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
+#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
+#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_TRYLOCK
+#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
+#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
+#endif
+
+#ifndef CONFIG_UNINLINE_SPIN_UNLOCK
+#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
+#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
+#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
+#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
+#endif
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_spin_trylock(lock)) {
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+/*
+ * If lockdep is enabled then we use the non-preemption spin-ops
+ * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
+ * not re-enabled during lock-acquire (which the preempt-spin-ops do):
+ */
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ /*
+ * On lockdep we dont want the hand-coded irq-enable of
+ * do_raw_spin_lock_flags() code, because lockdep assumes
+ * that interrupts are not re-enabled during lock-acquire:
+ */
+#ifdef CONFIG_LOCKDEP
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+#else
+ do_raw_spin_lock_flags(lock, &flags);
+#endif
+ return flags;
+}
+
+static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+}
+
+static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
+{
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+}
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+}
+
+#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
+
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_spin_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
+ unsigned long flags)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_spin_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_spin_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_spin_unlock(lock);
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+}
+
+static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
+{
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+ if (do_raw_spin_trylock(lock)) {
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+ return 0;
+}
+
+#ifndef CONFIG_PREEMPT_RT_FULL
+# include <linux/rwlock_api_smp.h>
+#endif
+
+#endif /* __LINUX_SPINLOCK_API_SMP_H */