diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/arch/arm64/include/asm/atomic.h | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/arm64/include/asm/atomic.h')
-rw-r--r-- | kernel/arch/arm64/include/asm/atomic.h | 320 |
1 files changed, 107 insertions, 213 deletions
diff --git a/kernel/arch/arm64/include/asm/atomic.h b/kernel/arch/arm64/include/asm/atomic.h index 7047051de..f3a3586a4 100644 --- a/kernel/arch/arm64/include/asm/atomic.h +++ b/kernel/arch/arm64/include/asm/atomic.h @@ -24,233 +24,127 @@ #include <linux/types.h> #include <asm/barrier.h> -#include <asm/cmpxchg.h> - -#define ATOMIC_INIT(i) { (i) } +#include <asm/lse.h> #ifdef __KERNEL__ -/* - * On ARM, ordinary assignment (str instruction) doesn't clear the local - * strex/ldrex monitor on some implementations. The reason we can use it for - * atomic_set() is the clrex or dummy strex done on every exception return. - */ -#define atomic_read(v) ACCESS_ONCE((v)->counter) -#define atomic_set(v,i) (((v)->counter) = (i)) - -/* - * AArch64 UP and SMP safe atomic ops. We use load exclusive and - * store exclusive to ensure that these are atomic. We may loop - * to ensure that the update happens. - */ - -#define ATOMIC_OP(op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ -{ \ - unsigned long tmp; \ - int result; \ - \ - asm volatile("// atomic_" #op "\n" \ -"1: ldxr %w0, %2\n" \ -" " #asm_op " %w0, %w0, %w3\n" \ -" stxr %w1, %w0, %2\n" \ -" cbnz %w1, 1b" \ - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i)); \ -} \ +#define __ARM64_IN_ATOMIC_IMPL -#define ATOMIC_OP_RETURN(op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ -{ \ - unsigned long tmp; \ - int result; \ - \ - asm volatile("// atomic_" #op "_return\n" \ -"1: ldxr %w0, %2\n" \ -" " #asm_op " %w0, %w0, %w3\n" \ -" stlxr %w1, %w0, %2\n" \ -" cbnz %w1, 1b" \ - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ - : "memory"); \ - \ - smp_mb(); \ - return result; \ -} - -#define ATOMIC_OPS(op, asm_op) \ - ATOMIC_OP(op, asm_op) \ - ATOMIC_OP_RETURN(op, asm_op) - -ATOMIC_OPS(add, add) -ATOMIC_OPS(sub, sub) - -#undef ATOMIC_OPS -#undef ATOMIC_OP_RETURN -#undef ATOMIC_OP - -static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) -{ - unsigned long tmp; - int oldval; - - smp_mb(); - - asm volatile("// atomic_cmpxchg\n" -"1: ldxr %w1, %2\n" -" cmp %w1, %w3\n" -" b.ne 2f\n" -" stxr %w0, %w4, %2\n" -" cbnz %w0, 1b\n" -"2:" - : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) - : "Ir" (old), "r" (new) - : "cc"); - - smp_mb(); - return oldval; -} - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE) +#include <asm/atomic_lse.h> +#else +#include <asm/atomic_ll_sc.h> +#endif -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; +#undef __ARM64_IN_ATOMIC_IMPL - c = atomic_read(v); - while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) - c = old; - return c; -} +#include <asm/cmpxchg.h> -#define atomic_inc(v) atomic_add(1, v) -#define atomic_dec(v) atomic_sub(1, v) +#define ___atomic_add_unless(v, a, u, sfx) \ +({ \ + typeof((v)->counter) c, old; \ + \ + c = atomic##sfx##_read(v); \ + while (c != (u) && \ + (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c; \ + }) -#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -#define atomic_inc_return(v) (atomic_add_return(1, v)) -#define atomic_dec_return(v) (atomic_sub_return(1, v)) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) +#define ATOMIC_INIT(i) { (i) } -#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) +#define atomic_read(v) READ_ONCE((v)->counter) +#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) + +#define atomic_add_return_relaxed atomic_add_return_relaxed +#define atomic_add_return_acquire atomic_add_return_acquire +#define atomic_add_return_release atomic_add_return_release +#define atomic_add_return atomic_add_return + +#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) +#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) +#define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) +#define atomic_inc_return(v) atomic_add_return(1, (v)) + +#define atomic_sub_return_relaxed atomic_sub_return_relaxed +#define atomic_sub_return_acquire atomic_sub_return_acquire +#define atomic_sub_return_release atomic_sub_return_release +#define atomic_sub_return atomic_sub_return + +#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) +#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v)) +#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) +#define atomic_dec_return(v) atomic_sub_return(1, (v)) + +#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) +#define atomic_xchg_acquire(v, new) xchg_acquire(&((v)->counter), (new)) +#define atomic_xchg_release(v, new) xchg_release(&((v)->counter), (new)) +#define atomic_xchg(v, new) xchg(&((v)->counter), (new)) + +#define atomic_cmpxchg_relaxed(v, old, new) \ + cmpxchg_relaxed(&((v)->counter), (old), (new)) +#define atomic_cmpxchg_acquire(v, old, new) \ + cmpxchg_acquire(&((v)->counter), (old), (new)) +#define atomic_cmpxchg_release(v, old, new) \ + cmpxchg_release(&((v)->counter), (old), (new)) +#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new)) + +#define atomic_inc(v) atomic_add(1, (v)) +#define atomic_dec(v) atomic_sub(1, (v)) +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) +#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) +#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) +#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) +#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,) +#define atomic_andnot atomic_andnot /* * 64-bit atomic operations. */ -#define ATOMIC64_INIT(i) { (i) } - -#define atomic64_read(v) ACCESS_ONCE((v)->counter) -#define atomic64_set(v,i) (((v)->counter) = (i)) - -#define ATOMIC64_OP(op, asm_op) \ -static inline void atomic64_##op(long i, atomic64_t *v) \ -{ \ - long result; \ - unsigned long tmp; \ - \ - asm volatile("// atomic64_" #op "\n" \ -"1: ldxr %0, %2\n" \ -" " #asm_op " %0, %0, %3\n" \ -" stxr %w1, %0, %2\n" \ -" cbnz %w1, 1b" \ - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i)); \ -} \ - -#define ATOMIC64_OP_RETURN(op, asm_op) \ -static inline long atomic64_##op##_return(long i, atomic64_t *v) \ -{ \ - long result; \ - unsigned long tmp; \ - \ - asm volatile("// atomic64_" #op "_return\n" \ -"1: ldxr %0, %2\n" \ -" " #asm_op " %0, %0, %3\n" \ -" stlxr %w1, %0, %2\n" \ -" cbnz %w1, 1b" \ - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ - : "memory"); \ - \ - smp_mb(); \ - return result; \ -} - -#define ATOMIC64_OPS(op, asm_op) \ - ATOMIC64_OP(op, asm_op) \ - ATOMIC64_OP_RETURN(op, asm_op) - -ATOMIC64_OPS(add, add) -ATOMIC64_OPS(sub, sub) - -#undef ATOMIC64_OPS -#undef ATOMIC64_OP_RETURN -#undef ATOMIC64_OP - -static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) -{ - long oldval; - unsigned long res; - - smp_mb(); - - asm volatile("// atomic64_cmpxchg\n" -"1: ldxr %1, %2\n" -" cmp %1, %3\n" -" b.ne 2f\n" -" stxr %w0, %4, %2\n" -" cbnz %w0, 1b\n" -"2:" - : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) - : "Ir" (old), "r" (new) - : "cc"); - - smp_mb(); - return oldval; -} - -#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) - -static inline long atomic64_dec_if_positive(atomic64_t *v) -{ - long result; - unsigned long tmp; - - asm volatile("// atomic64_dec_if_positive\n" -"1: ldxr %0, %2\n" -" subs %0, %0, #1\n" -" b.mi 2f\n" -" stlxr %w1, %0, %2\n" -" cbnz %w1, 1b\n" -" dmb ish\n" -"2:" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : - : "cc", "memory"); - - return result; -} - -static inline int atomic64_add_unless(atomic64_t *v, long a, long u) -{ - long c, old; - - c = atomic64_read(v); - while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c) - c = old; - - return c != u; -} - -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) -#define atomic64_inc(v) atomic64_add(1LL, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) +#define ATOMIC64_INIT ATOMIC_INIT +#define atomic64_read atomic_read +#define atomic64_set atomic_set + +#define atomic64_add_return_relaxed atomic64_add_return_relaxed +#define atomic64_add_return_acquire atomic64_add_return_acquire +#define atomic64_add_return_release atomic64_add_return_release +#define atomic64_add_return atomic64_add_return + +#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) +#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v)) +#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v)) +#define atomic64_inc_return(v) atomic64_add_return(1, (v)) + +#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed +#define atomic64_sub_return_acquire atomic64_sub_return_acquire +#define atomic64_sub_return_release atomic64_sub_return_release +#define atomic64_sub_return atomic64_sub_return + +#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) +#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v)) +#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) + +#define atomic64_xchg_relaxed atomic_xchg_relaxed +#define atomic64_xchg_acquire atomic_xchg_acquire +#define atomic64_xchg_release atomic_xchg_release +#define atomic64_xchg atomic_xchg + +#define atomic64_cmpxchg_relaxed atomic_cmpxchg_relaxed +#define atomic64_cmpxchg_acquire atomic_cmpxchg_acquire +#define atomic64_cmpxchg_release atomic_cmpxchg_release +#define atomic64_cmpxchg atomic_cmpxchg + +#define atomic64_inc(v) atomic64_add(1, (v)) +#define atomic64_dec(v) atomic64_sub(1, (v)) #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) -#define atomic64_dec(v) atomic64_sub(1LL, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) -#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) +#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) +#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) +#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) +#define atomic64_add_unless(v, a, u) (___atomic_add_unless(v, a, u, 64) != u) +#define atomic64_andnot atomic64_andnot + +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #endif #endif |