summaryrefslogtreecommitdiffstats
path: root/kernel/arch/mips/include/asm/spinlock.h
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/arch/mips/include/asm/spinlock.h
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/mips/include/asm/spinlock.h')
-rw-r--r--kernel/arch/mips/include/asm/spinlock.h9
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/arch/mips/include/asm/spinlock.h b/kernel/arch/mips/include/asm/spinlock.h
index 1fca2e079..40196bebe 100644
--- a/kernel/arch/mips/include/asm/spinlock.h
+++ b/kernel/arch/mips/include/asm/spinlock.h
@@ -42,6 +42,11 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
return ((counters >> 16) ^ counters) & 0xffff;
}
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.h.serving_now == lock.h.ticket;
+}
+
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
while (arch_spin_is_locked(x)) { cpu_relax(); }
@@ -109,7 +114,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
" subu %[ticket], %[my_ticket], %[ticket] \n"
"2: \n"
" .subsection 2 \n"
- "4: andi %[ticket], %[ticket], 0x1fff \n"
+ "4: andi %[ticket], %[ticket], 0xffff \n"
" sll %[ticket], 5 \n"
" \n"
"6: bnez %[ticket], 6b \n"
@@ -317,7 +322,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- smp_mb();
+ smp_mb__before_llsc();
__asm__ __volatile__(
" # arch_write_unlock \n"