diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/arch/tile/lib | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/tile/lib')
-rw-r--r-- | kernel/arch/tile/lib/atomic_32.c | 23 | ||||
-rw-r--r-- | kernel/arch/tile/lib/atomic_asm_32.S | 4 | ||||
-rw-r--r-- | kernel/arch/tile/lib/exports.c | 3 | ||||
-rw-r--r-- | kernel/arch/tile/lib/memcpy_user_64.c | 4 | ||||
-rw-r--r-- | kernel/arch/tile/lib/spinlock_32.c | 11 | ||||
-rw-r--r-- | kernel/arch/tile/lib/spinlock_64.c | 11 | ||||
-rw-r--r-- | kernel/arch/tile/lib/usercopy_32.S | 46 | ||||
-rw-r--r-- | kernel/arch/tile/lib/usercopy_64.S | 46 |
8 files changed, 49 insertions, 99 deletions
diff --git a/kernel/arch/tile/lib/atomic_32.c b/kernel/arch/tile/lib/atomic_32.c index c89b211fd..298df1e99 100644 --- a/kernel/arch/tile/lib/atomic_32.c +++ b/kernel/arch/tile/lib/atomic_32.c @@ -94,6 +94,12 @@ unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask) } EXPORT_SYMBOL(_atomic_or); +unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask) +{ + return __atomic_and((int *)p, __atomic_setup(p), mask).val; +} +EXPORT_SYMBOL(_atomic_and); + unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) { return __atomic_andn((int *)p, __atomic_setup(p), mask).val; @@ -136,6 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n) } EXPORT_SYMBOL(_atomic64_cmpxchg); +long long _atomic64_and(long long *v, long long n) +{ + return __atomic64_and(v, __atomic_setup(v), n); +} +EXPORT_SYMBOL(_atomic64_and); + +long long _atomic64_or(long long *v, long long n) +{ + return __atomic64_or(v, __atomic_setup(v), n); +} +EXPORT_SYMBOL(_atomic64_or); + +long long _atomic64_xor(long long *v, long long n) +{ + return __atomic64_xor(v, __atomic_setup(v), n); +} +EXPORT_SYMBOL(_atomic64_xor); /* * If any of the atomic or futex routines hit a bad address (not in diff --git a/kernel/arch/tile/lib/atomic_asm_32.S b/kernel/arch/tile/lib/atomic_asm_32.S index 6bda3132c..f61126563 100644 --- a/kernel/arch/tile/lib/atomic_asm_32.S +++ b/kernel/arch/tile/lib/atomic_asm_32.S @@ -178,6 +178,7 @@ atomic_op _xchg_add, 32, "add r24, r22, r2" atomic_op _xchg_add_unless, 32, \ "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" atomic_op _or, 32, "or r24, r22, r2" +atomic_op _and, 32, "and r24, r22, r2" atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2" atomic_op _xor, 32, "xor r24, r22, r2" @@ -191,6 +192,9 @@ atomic_op 64_xchg_add_unless, 64, \ { bbns r26, 3f; add r24, r22, r4 }; \ { bbns r27, 3f; add r25, r23, r5 }; \ slt_u r26, r24, r22; add r25, r25, r26" +atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }" +atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }" +atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }" jrp lr /* happy backtracer */ diff --git a/kernel/arch/tile/lib/exports.c b/kernel/arch/tile/lib/exports.c index 82733c87d..9d171ca43 100644 --- a/kernel/arch/tile/lib/exports.c +++ b/kernel/arch/tile/lib/exports.c @@ -18,8 +18,6 @@ /* arch/tile/lib/usercopy.S */ #include <linux/uaccess.h> -EXPORT_SYMBOL(strnlen_user_asm); -EXPORT_SYMBOL(strncpy_from_user_asm); EXPORT_SYMBOL(clear_user_asm); EXPORT_SYMBOL(flush_user_asm); EXPORT_SYMBOL(finv_user_asm); @@ -28,7 +26,6 @@ EXPORT_SYMBOL(finv_user_asm); #include <linux/kernel.h> #include <asm/processor.h> EXPORT_SYMBOL(current_text_addr); -EXPORT_SYMBOL(dump_stack); /* arch/tile/kernel/head.S */ EXPORT_SYMBOL(empty_zero_page); diff --git a/kernel/arch/tile/lib/memcpy_user_64.c b/kernel/arch/tile/lib/memcpy_user_64.c index 88c701649..97bbb6060 100644 --- a/kernel/arch/tile/lib/memcpy_user_64.c +++ b/kernel/arch/tile/lib/memcpy_user_64.c @@ -28,7 +28,7 @@ #define _ST(p, inst, v) \ ({ \ asm("1: " #inst " %0, %1;" \ - ".pushsection .coldtext.memcpy,\"ax\";" \ + ".pushsection .coldtext,\"ax\";" \ "2: { move r0, %2; jrp lr };" \ ".section __ex_table,\"a\";" \ ".align 8;" \ @@ -41,7 +41,7 @@ ({ \ unsigned long __v; \ asm("1: " #inst " %0, %1;" \ - ".pushsection .coldtext.memcpy,\"ax\";" \ + ".pushsection .coldtext,\"ax\";" \ "2: { move r0, %2; jrp lr };" \ ".section __ex_table,\"a\";" \ ".align 8;" \ diff --git a/kernel/arch/tile/lib/spinlock_32.c b/kernel/arch/tile/lib/spinlock_32.c index b34f79aad..88c2a5336 100644 --- a/kernel/arch/tile/lib/spinlock_32.c +++ b/kernel/arch/tile/lib/spinlock_32.c @@ -65,8 +65,17 @@ EXPORT_SYMBOL(arch_spin_trylock); void arch_spin_unlock_wait(arch_spinlock_t *lock) { u32 iterations = 0; - while (arch_spin_is_locked(lock)) + int curr = READ_ONCE(lock->current_ticket); + int next = READ_ONCE(lock->next_ticket); + + /* Return immediately if unlocked. */ + if (next == curr) + return; + + /* Wait until the current locker has released the lock. */ + do { delay_backoff(iterations++); + } while (READ_ONCE(lock->current_ticket) == curr); } EXPORT_SYMBOL(arch_spin_unlock_wait); diff --git a/kernel/arch/tile/lib/spinlock_64.c b/kernel/arch/tile/lib/spinlock_64.c index d6fb9581e..c8d1f94ff 100644 --- a/kernel/arch/tile/lib/spinlock_64.c +++ b/kernel/arch/tile/lib/spinlock_64.c @@ -65,8 +65,17 @@ EXPORT_SYMBOL(arch_spin_trylock); void arch_spin_unlock_wait(arch_spinlock_t *lock) { u32 iterations = 0; - while (arch_spin_is_locked(lock)) + u32 val = READ_ONCE(lock->lock); + u32 curr = arch_spin_current(val); + + /* Return immediately if unlocked. */ + if (arch_spin_next(val) == curr) + return; + + /* Wait until the current locker has released the lock. */ + do { delay_backoff(iterations++); + } while (arch_spin_current(READ_ONCE(lock->lock)) == curr); } EXPORT_SYMBOL(arch_spin_unlock_wait); diff --git a/kernel/arch/tile/lib/usercopy_32.S b/kernel/arch/tile/lib/usercopy_32.S index 1bc162224..db93ad5fa 100644 --- a/kernel/arch/tile/lib/usercopy_32.S +++ b/kernel/arch/tile/lib/usercopy_32.S @@ -20,52 +20,6 @@ /* Access user memory, but use MMU to avoid propagating kernel exceptions. */ /* - * strnlen_user_asm takes the pointer in r0, and the length bound in r1. - * It returns the length, including the terminating NUL, or zero on exception. - * If length is greater than the bound, returns one plus the bound. - */ -STD_ENTRY(strnlen_user_asm) - { bz r1, 2f; addi r3, r0, -1 } /* bias down to include NUL */ -1: { lb_u r4, r0; addi r1, r1, -1 } - bz r4, 2f - { bnzt r1, 1b; addi r0, r0, 1 } -2: { sub r0, r0, r3; jrp lr } - STD_ENDPROC(strnlen_user_asm) - .pushsection .fixup,"ax" -strnlen_user_fault: - { move r0, zero; jrp lr } - ENDPROC(strnlen_user_fault) - .section __ex_table,"a" - .align 4 - .word 1b, strnlen_user_fault - .popsection - -/* - * strncpy_from_user_asm takes the kernel target pointer in r0, - * the userspace source pointer in r1, and the length bound (including - * the trailing NUL) in r2. On success, it returns the string length - * (not including the trailing NUL), or -EFAULT on failure. - */ -STD_ENTRY(strncpy_from_user_asm) - { bz r2, 2f; move r3, r0 } -1: { lb_u r4, r1; addi r1, r1, 1; addi r2, r2, -1 } - { sb r0, r4; addi r0, r0, 1 } - bz r4, 2f - bnzt r2, 1b - { sub r0, r0, r3; jrp lr } -2: addi r0, r0, -1 /* don't count the trailing NUL */ - { sub r0, r0, r3; jrp lr } - STD_ENDPROC(strncpy_from_user_asm) - .pushsection .fixup,"ax" -strncpy_from_user_fault: - { movei r0, -EFAULT; jrp lr } - ENDPROC(strncpy_from_user_fault) - .section __ex_table,"a" - .align 4 - .word 1b, strncpy_from_user_fault - .popsection - -/* * clear_user_asm takes the user target address in r0 and the * number of bytes to zero in r1. * It returns the number of uncopiable bytes (hopefully zero) in r0. diff --git a/kernel/arch/tile/lib/usercopy_64.S b/kernel/arch/tile/lib/usercopy_64.S index b3b31a330..9322dc551 100644 --- a/kernel/arch/tile/lib/usercopy_64.S +++ b/kernel/arch/tile/lib/usercopy_64.S @@ -20,52 +20,6 @@ /* Access user memory, but use MMU to avoid propagating kernel exceptions. */ /* - * strnlen_user_asm takes the pointer in r0, and the length bound in r1. - * It returns the length, including the terminating NUL, or zero on exception. - * If length is greater than the bound, returns one plus the bound. - */ -STD_ENTRY(strnlen_user_asm) - { beqz r1, 2f; addi r3, r0, -1 } /* bias down to include NUL */ -1: { ld1u r4, r0; addi r1, r1, -1 } - beqz r4, 2f - { bnezt r1, 1b; addi r0, r0, 1 } -2: { sub r0, r0, r3; jrp lr } - STD_ENDPROC(strnlen_user_asm) - .pushsection .fixup,"ax" -strnlen_user_fault: - { move r0, zero; jrp lr } - ENDPROC(strnlen_user_fault) - .section __ex_table,"a" - .align 8 - .quad 1b, strnlen_user_fault - .popsection - -/* - * strncpy_from_user_asm takes the kernel target pointer in r0, - * the userspace source pointer in r1, and the length bound (including - * the trailing NUL) in r2. On success, it returns the string length - * (not including the trailing NUL), or -EFAULT on failure. - */ -STD_ENTRY(strncpy_from_user_asm) - { beqz r2, 2f; move r3, r0 } -1: { ld1u r4, r1; addi r1, r1, 1; addi r2, r2, -1 } - { st1 r0, r4; addi r0, r0, 1 } - beqz r4, 2f - bnezt r2, 1b - { sub r0, r0, r3; jrp lr } -2: addi r0, r0, -1 /* don't count the trailing NUL */ - { sub r0, r0, r3; jrp lr } - STD_ENDPROC(strncpy_from_user_asm) - .pushsection .fixup,"ax" -strncpy_from_user_fault: - { movei r0, -EFAULT; jrp lr } - ENDPROC(strncpy_from_user_fault) - .section __ex_table,"a" - .align 8 - .quad 1b, strncpy_from_user_fault - .popsection - -/* * clear_user_asm takes the user target address in r0 and the * number of bytes to zero in r1. * It returns the number of uncopiable bytes (hopefully zero) in r0. |