diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/arch/alpha/include/asm/atomic.h | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/alpha/include/asm/atomic.h')
-rw-r--r-- | kernel/arch/alpha/include/asm/atomic.h | 50 |
1 files changed, 31 insertions, 19 deletions
diff --git a/kernel/arch/alpha/include/asm/atomic.h b/kernel/arch/alpha/include/asm/atomic.h index 8f8eafbed..572b228c4 100644 --- a/kernel/arch/alpha/include/asm/atomic.h +++ b/kernel/arch/alpha/include/asm/atomic.h @@ -17,11 +17,11 @@ #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } -#define atomic_read(v) ACCESS_ONCE((v)->counter) -#define atomic64_read(v) ACCESS_ONCE((v)->counter) +#define atomic_read(v) READ_ONCE((v)->counter) +#define atomic64_read(v) READ_ONCE((v)->counter) -#define atomic_set(v,i) ((v)->counter = (i)) -#define atomic64_set(v,i) ((v)->counter = (i)) +#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) +#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i)) /* * To get proper branch prediction for the main line, we must branch @@ -29,13 +29,13 @@ * branch back to restart the operation. */ -#define ATOMIC_OP(op) \ +#define ATOMIC_OP(op, asm_op) \ static __inline__ void atomic_##op(int i, atomic_t * v) \ { \ unsigned long temp; \ __asm__ __volatile__( \ "1: ldl_l %0,%1\n" \ - " " #op "l %0,%2,%0\n" \ + " " #asm_op " %0,%2,%0\n" \ " stl_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ :"Ir" (i), "m" (v->counter)); \ } \ -#define ATOMIC_OP_RETURN(op) \ +#define ATOMIC_OP_RETURN(op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ long temp, result; \ smp_mb(); \ __asm__ __volatile__( \ "1: ldl_l %0,%1\n" \ - " " #op "l %0,%3,%2\n" \ - " " #op "l %0,%3,%0\n" \ + " " #asm_op " %0,%3,%2\n" \ + " " #asm_op " %0,%3,%0\n" \ " stl_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ return result; \ } -#define ATOMIC64_OP(op) \ +#define ATOMIC64_OP(op, asm_op) \ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ { \ unsigned long temp; \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ - " " #op "q %0,%2,%0\n" \ + " " #asm_op " %0,%2,%0\n" \ " stq_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ :"Ir" (i), "m" (v->counter)); \ } \ -#define ATOMIC64_OP_RETURN(op) \ +#define ATOMIC64_OP_RETURN(op, asm_op) \ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ { \ long temp, result; \ smp_mb(); \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ - " " #op "q %0,%3,%2\n" \ - " " #op "q %0,%3,%0\n" \ + " " #asm_op " %0,%3,%2\n" \ + " " #asm_op " %0,%3,%0\n" \ " stq_c %0,%1\n" \ " beq %0,2f\n" \ ".subsection 2\n" \ @@ -101,15 +101,27 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ return result; \ } -#define ATOMIC_OPS(opg) \ - ATOMIC_OP(opg) \ - ATOMIC_OP_RETURN(opg) \ - ATOMIC64_OP(opg) \ - ATOMIC64_OP_RETURN(opg) +#define ATOMIC_OPS(op) \ + ATOMIC_OP(op, op##l) \ + ATOMIC_OP_RETURN(op, op##l) \ + ATOMIC64_OP(op, op##q) \ + ATOMIC64_OP_RETURN(op, op##q) ATOMIC_OPS(add) ATOMIC_OPS(sub) +#define atomic_andnot atomic_andnot +#define atomic64_andnot atomic64_andnot + +ATOMIC_OP(and, and) +ATOMIC_OP(andnot, bic) +ATOMIC_OP(or, bis) +ATOMIC_OP(xor, xor) +ATOMIC64_OP(and, and) +ATOMIC64_OP(andnot, bic) +ATOMIC64_OP(or, bis) +ATOMIC64_OP(xor, xor) + #undef ATOMIC_OPS #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP |