summaryrefslogtreecommitdiffstats
path: root/kernel/include/linux/lglock.h
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/include/linux/lglock.h
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/include/linux/lglock.h')
-rw-r--r--kernel/include/linux/lglock.h34
1 files changed, 20 insertions, 14 deletions
diff --git a/kernel/include/linux/lglock.h b/kernel/include/linux/lglock.h
index 9603a1500..6f035f635 100644
--- a/kernel/include/linux/lglock.h
+++ b/kernel/include/linux/lglock.h
@@ -34,10 +34,10 @@
#endif
struct lglock {
-#ifndef CONFIG_PREEMPT_RT_FULL
- arch_spinlock_t __percpu *lock;
-#else
+#ifdef CONFIG_PREEMPT_RT_FULL
struct rt_mutex __percpu *lock;
+#else
+ arch_spinlock_t __percpu *lock;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key lock_key;
@@ -45,34 +45,40 @@ struct lglock {
#endif
};
-#ifndef CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_PREEMPT_RT_FULL
# define DEFINE_LGLOCK(name) \
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
+ = __RT_MUTEX_INITIALIZER( name ## _lock); \
struct lglock name = { .lock = &name ## _lock }
# define DEFINE_STATIC_LGLOCK(name) \
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
+ = __RT_MUTEX_INITIALIZER( name ## _lock); \
static struct lglock name = { .lock = &name ## _lock }
+
#else
-# define DEFINE_LGLOCK(name) \
- static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
- = __RT_MUTEX_INITIALIZER( name ## _lock); \
+#define DEFINE_LGLOCK(name) \
+ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
struct lglock name = { .lock = &name ## _lock }
-# define DEFINE_STATIC_LGLOCK(name) \
- static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
- = __RT_MUTEX_INITIALIZER( name ## _lock); \
+#define DEFINE_STATIC_LGLOCK(name) \
+ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
static struct lglock name = { .lock = &name ## _lock }
#endif
void lg_lock_init(struct lglock *lg, char *name);
+
void lg_local_lock(struct lglock *lg);
void lg_local_unlock(struct lglock *lg);
void lg_local_lock_cpu(struct lglock *lg, int cpu);
void lg_local_unlock_cpu(struct lglock *lg, int cpu);
+
+void lg_double_lock(struct lglock *lg, int cpu1, int cpu2);
+void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
+
void lg_global_lock(struct lglock *lg);
void lg_global_unlock(struct lglock *lg);