diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/include/linux/spinlock_rt.h | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/include/linux/spinlock_rt.h')
-rw-r--r-- | kernel/include/linux/spinlock_rt.h | 25 |
1 files changed, 7 insertions, 18 deletions
diff --git a/kernel/include/linux/spinlock_rt.h b/kernel/include/linux/spinlock_rt.h index f757096b2..3b2825537 100644 --- a/kernel/include/linux/spinlock_rt.h +++ b/kernel/include/linux/spinlock_rt.h @@ -18,6 +18,10 @@ do { \ __rt_spin_lock_init(slock, #slock, &__key); \ } while (0) +void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock); +void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock); +int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock); + extern void __lockfunc rt_spin_lock(spinlock_t *lock); extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); @@ -32,20 +36,16 @@ extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); * lockdep-less calls, for derived types like rwlock: * (for trylock they can use rt_mutex_trylock() directly. */ +extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock); extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock); -#define spin_lock(lock) \ - do { \ - migrate_disable(); \ - rt_spin_lock(lock); \ - } while (0) +#define spin_lock(lock) rt_spin_lock(lock) #define spin_lock_bh(lock) \ do { \ local_bh_disable(); \ - migrate_disable(); \ rt_spin_lock(lock); \ } while (0) @@ -56,24 +56,19 @@ extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock); #define spin_trylock(lock) \ ({ \ int __locked; \ - migrate_disable(); \ __locked = spin_do_trylock(lock); \ - if (!__locked) \ - migrate_enable(); \ __locked; \ }) #ifdef CONFIG_LOCKDEP # define spin_lock_nested(lock, subclass) \ do { \ - migrate_disable(); \ rt_spin_lock_nested(lock, subclass); \ } while (0) #define spin_lock_bh_nested(lock, subclass) \ do { \ local_bh_disable(); \ - migrate_disable(); \ rt_spin_lock_nested(lock, subclass); \ } while (0) @@ -81,7 +76,6 @@ extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock); do { \ typecheck(unsigned long, flags); \ flags = 0; \ - migrate_disable(); \ rt_spin_lock_nested(lock, subclass); \ } while (0) #else @@ -117,16 +111,11 @@ static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) /* FIXME: we need rt_spin_lock_nest_lock */ #define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) -#define spin_unlock(lock) \ - do { \ - rt_spin_unlock(lock); \ - migrate_enable(); \ - } while (0) +#define spin_unlock(lock) rt_spin_unlock(lock) #define spin_unlock_bh(lock) \ do { \ rt_spin_unlock(lock); \ - migrate_enable(); \ local_bh_enable(); \ } while (0) |