summaryrefslogtreecommitdiffstats
path: root/kernel/arch/arc
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2015-10-09 08:42:44 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2015-10-09 08:52:35 +0300
commitfdb8b20906f3546ba6c2f9f0686d8a5189516ba3 (patch)
tree6bb43dc8a42d6e9403763bc749f706939dd2bc60 /kernel/arch/arc
parentcc84a1f21026270463b580f2564f9d71912b20db (diff)
Kernel bump from 4.1.3-rt to 4.1.7-rt.
These changes brings a vanilla kernel from kernel.org, and the patch applied for rt is patch-4.1.7-rt8.patch. No further changes needed. Change-Id: Id8dd03c2ddd971e4d1d69b905f3069737053b700 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/arc')
-rw-r--r--kernel/arch/arc/Makefile3
-rw-r--r--kernel/arch/arc/include/asm/bitops.h468
-rw-r--r--kernel/arch/arc/include/asm/ptrace.h2
3 files changed, 134 insertions, 339 deletions
diff --git a/kernel/arch/arc/Makefile b/kernel/arch/arc/Makefile
index db72fec0e..2f21e1e0e 100644
--- a/kernel/arch/arc/Makefile
+++ b/kernel/arch/arc/Makefile
@@ -43,7 +43,8 @@ endif
ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
# Generic build system uses -O2, we want -O3
-cflags-y += -O3
+# Note: No need to add to cflags-y as that happens anyways
+ARCH_CFLAGS += -O3
endif
# small data is default for elf32 tool-chain. If not usable, disable it
diff --git a/kernel/arch/arc/include/asm/bitops.h b/kernel/arch/arc/include/asm/bitops.h
index 624a9d048..dae03e66f 100644
--- a/kernel/arch/arc/include/asm/bitops.h
+++ b/kernel/arch/arc/include/asm/bitops.h
@@ -18,83 +18,49 @@
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/barrier.h>
+#ifndef CONFIG_ARC_HAS_LLSC
+#include <asm/smp.h>
+#endif
-/*
- * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
- * The Kconfig glue ensures that in SMP, this is only set if the container
- * SoC/platform has cross-core coherent LLOCK/SCOND
- */
#if defined(CONFIG_ARC_HAS_LLSC)
-static inline void set_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned int temp;
-
- m += nr >> 5;
-
- /*
- * ARC ISA micro-optimization:
- *
- * Instructions dealing with bitpos only consider lower 5 bits (0-31)
- * e.g (x << 33) is handled like (x << 1) by ASL instruction
- * (mem pointer still needs adjustment to point to next word)
- *
- * Hence the masking to clamp @nr arg can be elided in general.
- *
- * However if @nr is a constant (above assumed it in a register),
- * and greater than 31, gcc can optimize away (x << 33) to 0,
- * as overflow, given the 32-bit ISA. Thus masking needs to be done
- * for constant @nr, but no code is generated due to const prop.
- */
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " bset %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(m), "ir"(nr)
- : "cc");
-}
-
-static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned int temp;
-
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " bclr %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(m), "ir"(nr)
- : "cc");
-}
-
-static inline void change_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned int temp;
-
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
+/*
+ * Hardware assisted Atomic-R-M-W
+ */
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " bxor %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(m), "ir"(nr)
- : "cc");
+#define BIT_OP(op, c_op, asm_op) \
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{ \
+ unsigned int temp; \
+ \
+ m += nr >> 5; \
+ \
+ /* \
+ * ARC ISA micro-optimization: \
+ * \
+ * Instructions dealing with bitpos only consider lower 5 bits \
+ * e.g (x << 33) is handled like (x << 1) by ASL instruction \
+ * (mem pointer still needs adjustment to point to next word) \
+ * \
+ * Hence the masking to clamp @nr arg can be elided in general. \
+ * \
+ * However if @nr is a constant (above assumed in a register), \
+ * and greater than 31, gcc can optimize away (x << 33) to 0, \
+ * as overflow, given the 32-bit ISA. Thus masking needs to be \
+ * done for const @nr, but no code is generated due to gcc \
+ * const prop. \
+ */ \
+ nr &= 0x1f; \
+ \
+ __asm__ __volatile__( \
+ "1: llock %0, [%1] \n" \
+ " " #asm_op " %0, %0, %2 \n" \
+ " scond %0, [%1] \n" \
+ " bnz 1b \n" \
+ : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
+ : "r"(m), /* Not "m": llock only supports reg direct addr mode */ \
+ "ir"(nr) \
+ : "cc"); \
}
/*
@@ -108,91 +74,37 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *m)
* Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
* and the old value of bit is returned
*/
-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old, temp;
-
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- /*
- * Explicit full memory barrier needed before/after as
- * LLOCK/SCOND themselves don't provide any such semantics
- */
- smp_mb();
-
- __asm__ __volatile__(
- "1: llock %0, [%2] \n"
- " bset %1, %0, %3 \n"
- " scond %1, [%2] \n"
- " bnz 1b \n"
- : "=&r"(old), "=&r"(temp)
- : "r"(m), "ir"(nr)
- : "cc");
-
- smp_mb();
-
- return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned int old, temp;
-
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- smp_mb();
-
- __asm__ __volatile__(
- "1: llock %0, [%2] \n"
- " bclr %1, %0, %3 \n"
- " scond %1, [%2] \n"
- " bnz 1b \n"
- : "=&r"(old), "=&r"(temp)
- : "r"(m), "ir"(nr)
- : "cc");
-
- smp_mb();
-
- return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned int old, temp;
-
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- smp_mb();
-
- __asm__ __volatile__(
- "1: llock %0, [%2] \n"
- " bxor %1, %0, %3 \n"
- " scond %1, [%2] \n"
- " bnz 1b \n"
- : "=&r"(old), "=&r"(temp)
- : "r"(m), "ir"(nr)
- : "cc");
-
- smp_mb();
-
- return (old & (1 << nr)) != 0;
+#define TEST_N_BIT_OP(op, c_op, asm_op) \
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{ \
+ unsigned long old, temp; \
+ \
+ m += nr >> 5; \
+ \
+ nr &= 0x1f; \
+ \
+ /* \
+ * Explicit full memory barrier needed before/after as \
+ * LLOCK/SCOND themselves don't provide any such smenatic \
+ */ \
+ smp_mb(); \
+ \
+ __asm__ __volatile__( \
+ "1: llock %0, [%2] \n" \
+ " " #asm_op " %1, %0, %3 \n" \
+ " scond %1, [%2] \n" \
+ " bnz 1b \n" \
+ : "=&r"(old), "=&r"(temp) \
+ : "r"(m), "ir"(nr) \
+ : "cc"); \
+ \
+ smp_mb(); \
+ \
+ return (old & (1 << nr)) != 0; \
}
#else /* !CONFIG_ARC_HAS_LLSC */
-#include <asm/smp.h>
-
/*
* Non hardware assisted Atomic-R-M-W
* Locking would change to irq-disabling only (UP) and spinlocks (SMP)
@@ -209,111 +121,37 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
* at compile time)
*/
-static inline void set_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long temp, flags;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- bitops_lock(flags);
-
- temp = *m;
- *m = temp | (1UL << nr);
-
- bitops_unlock(flags);
+#define BIT_OP(op, c_op, asm_op) \
+static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+{ \
+ unsigned long temp, flags; \
+ m += nr >> 5; \
+ \
+ /* \
+ * spin lock/unlock provide the needed smp_mb() before/after \
+ */ \
+ bitops_lock(flags); \
+ \
+ temp = *m; \
+ *m = temp c_op (1UL << (nr & 0x1f)); \
+ \
+ bitops_unlock(flags); \
}
-static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long temp, flags;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- bitops_lock(flags);
-
- temp = *m;
- *m = temp & ~(1UL << nr);
-
- bitops_unlock(flags);
-}
-
-static inline void change_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long temp, flags;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- bitops_lock(flags);
-
- temp = *m;
- *m = temp ^ (1UL << nr);
-
- bitops_unlock(flags);
-}
-
-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old, flags;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- /*
- * spin lock/unlock provide the needed smp_mb() before/after
- */
- bitops_lock(flags);
-
- old = *m;
- *m = old | (1 << nr);
-
- bitops_unlock(flags);
-
- return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old, flags;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- bitops_lock(flags);
-
- old = *m;
- *m = old & ~(1 << nr);
-
- bitops_unlock(flags);
-
- return (old & (1 << nr)) != 0;
-}
-
-static inline int
-test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old, flags;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- bitops_lock(flags);
-
- old = *m;
- *m = old ^ (1 << nr);
-
- bitops_unlock(flags);
-
- return (old & (1 << nr)) != 0;
+#define TEST_N_BIT_OP(op, c_op, asm_op) \
+static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{ \
+ unsigned long old, flags; \
+ m += nr >> 5; \
+ \
+ bitops_lock(flags); \
+ \
+ old = *m; \
+ *m = old c_op (1UL << (nr & 0x1f)); \
+ \
+ bitops_unlock(flags); \
+ \
+ return (old & (1UL << (nr & 0x1f))) != 0; \
}
#endif /* CONFIG_ARC_HAS_LLSC */
@@ -322,86 +160,45 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
* Non atomic variants
**************************************/
-static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long temp;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- temp = *m;
- *m = temp | (1UL << nr);
+#define __BIT_OP(op, c_op, asm_op) \
+static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \
+{ \
+ unsigned long temp; \
+ m += nr >> 5; \
+ \
+ temp = *m; \
+ *m = temp c_op (1UL << (nr & 0x1f)); \
}
-static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long temp;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- temp = *m;
- *m = temp & ~(1UL << nr);
+#define __TEST_N_BIT_OP(op, c_op, asm_op) \
+static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
+{ \
+ unsigned long old; \
+ m += nr >> 5; \
+ \
+ old = *m; \
+ *m = old c_op (1UL << (nr & 0x1f)); \
+ \
+ return (old & (1UL << (nr & 0x1f))) != 0; \
}
-static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long temp;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- temp = *m;
- *m = temp ^ (1UL << nr);
-}
-
-static inline int
-__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- old = *m;
- *m = old | (1 << nr);
-
- return (old & (1 << nr)) != 0;
-}
-
-static inline int
-__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- old = *m;
- *m = old & ~(1 << nr);
-
- return (old & (1 << nr)) != 0;
-}
-
-static inline int
-__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
-{
- unsigned long old;
- m += nr >> 5;
-
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- old = *m;
- *m = old ^ (1 << nr);
-
- return (old & (1 << nr)) != 0;
-}
+#define BIT_OPS(op, c_op, asm_op) \
+ \
+ /* set_bit(), clear_bit(), change_bit() */ \
+ BIT_OP(op, c_op, asm_op) \
+ \
+ /* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
+ TEST_N_BIT_OP(op, c_op, asm_op) \
+ \
+ /* __set_bit(), __clear_bit(), __change_bit() */ \
+ __BIT_OP(op, c_op, asm_op) \
+ \
+ /* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
+ __TEST_N_BIT_OP(op, c_op, asm_op)
+
+BIT_OPS(set, |, bset)
+BIT_OPS(clear, & ~, bclr)
+BIT_OPS(change, ^, bxor)
/*
* This routine doesn't need to be atomic.
@@ -413,10 +210,7 @@ test_bit(unsigned int nr, const volatile unsigned long *addr)
addr += nr >> 5;
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- mask = 1 << nr;
+ mask = 1UL << (nr & 0x1f);
return ((mask & *addr) != 0);
}
diff --git a/kernel/arch/arc/include/asm/ptrace.h b/kernel/arch/arc/include/asm/ptrace.h
index 1bfeec2c0..2a58af7a2 100644
--- a/kernel/arch/arc/include/asm/ptrace.h
+++ b/kernel/arch/arc/include/asm/ptrace.h
@@ -63,7 +63,7 @@ struct callee_regs {
long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
-#define instruction_pointer(regs) ((regs)->ret)
+#define instruction_pointer(regs) (unsigned long)((regs)->ret)
#define profile_pc(regs) instruction_pointer(regs)
/* return 1 if user mode or 0 if kernel mode */