summaryrefslogtreecommitdiffstats
path: root/kernel/arch/mips/include/asm
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2015-10-09 08:42:44 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2015-10-09 08:52:35 +0300
commitfdb8b20906f3546ba6c2f9f0686d8a5189516ba3 (patch)
tree6bb43dc8a42d6e9403763bc749f706939dd2bc60 /kernel/arch/mips/include/asm
parentcc84a1f21026270463b580f2564f9d71912b20db (diff)
Kernel bump from 4.1.3-rt to 4.1.7-rt.
These changes brings a vanilla kernel from kernel.org, and the patch applied for rt is patch-4.1.7-rt8.patch. No further changes needed. Change-Id: Id8dd03c2ddd971e4d1d69b905f3069737053b700 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/mips/include/asm')
-rw-r--r--kernel/arch/mips/include/asm/fpu.h2
-rw-r--r--kernel/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h10
-rw-r--r--kernel/arch/mips/include/asm/pgtable.h31
-rw-r--r--kernel/arch/mips/include/asm/smp.h1
-rw-r--r--kernel/arch/mips/include/asm/stackframe.h25
5 files changed, 58 insertions, 11 deletions
diff --git a/kernel/arch/mips/include/asm/fpu.h b/kernel/arch/mips/include/asm/fpu.h
index 084780b35..1b0625189 100644
--- a/kernel/arch/mips/include/asm/fpu.h
+++ b/kernel/arch/mips/include/asm/fpu.h
@@ -74,7 +74,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
goto fr_common;
case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
+#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) \
|| defined(CONFIG_64BIT))
/* we only have a 32-bit FPU */
return SIGFPE;
diff --git a/kernel/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/kernel/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644
index 11d3b572b..000000000
--- a/kernel/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-
-#define plat_post_dma_flush bmips_post_dma_flush
-
-#include <asm/mach-generic/dma-coherence.h>
-
-#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
diff --git a/kernel/arch/mips/include/asm/pgtable.h b/kernel/arch/mips/include/asm/pgtable.h
index 819af9d05..70f6e7f07 100644
--- a/kernel/arch/mips/include/asm/pgtable.h
+++ b/kernel/arch/mips/include/asm/pgtable.h
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
+#ifdef CONFIG_SMP
+ /*
+ * For SMP, multiple CPUs can race, so we need to do
+ * this atomically.
+ */
+#ifdef CONFIG_64BIT
+#define LL_INSN "lld"
+#define SC_INSN "scd"
+#else /* CONFIG_32BIT */
+#define LL_INSN "ll"
+#define SC_INSN "sc"
+#endif
+ unsigned long page_global = _PAGE_GLOBAL;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ " .set push\n"
+ " .set noreorder\n"
+ "1: " LL_INSN " %[tmp], %[buddy]\n"
+ " bnez %[tmp], 2f\n"
+ " or %[tmp], %[tmp], %[global]\n"
+ " " SC_INSN " %[tmp], %[buddy]\n"
+ " beqz %[tmp], 1b\n"
+ " nop\n"
+ "2:\n"
+ " .set pop"
+ : [buddy] "+m" (buddy->pte),
+ [tmp] "=&r" (tmp)
+ : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
}
#endif
}
diff --git a/kernel/arch/mips/include/asm/smp.h b/kernel/arch/mips/include/asm/smp.h
index 2b25d1ba1..16f1ea9ab 100644
--- a/kernel/arch/mips/include/asm/smp.h
+++ b/kernel/arch/mips/include/asm/smp.h
@@ -23,6 +23,7 @@
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
+extern cpumask_t cpu_foreign_map;
#define raw_smp_processor_id() (current_thread_info()->cpu)
diff --git a/kernel/arch/mips/include/asm/stackframe.h b/kernel/arch/mips/include/asm/stackframe.h
index 28d6d9364..a71da5768 100644
--- a/kernel/arch/mips/include/asm/stackframe.h
+++ b/kernel/arch/mips/include/asm/stackframe.h
@@ -152,6 +152,31 @@
.set noreorder
bltz k0, 8f
move k1, sp
+#ifdef CONFIG_EVA
+ /*
+ * Flush interAptiv's Return Prediction Stack (RPS) by writing
+ * EntryHi. Toggling Config7.RPS is slower and less portable.
+ *
+ * The RPS isn't automatically flushed when exceptions are
+ * taken, which can result in kernel mode speculative accesses
+ * to user addresses if the RPS mispredicts. That's harmless
+ * when user and kernel share the same address space, but with
+ * EVA the same user segments may be unmapped to kernel mode,
+ * even containing sensitive MMIO regions or invalid memory.
+ *
+ * This can happen when the kernel sets the return address to
+ * ret_from_* and jr's to the exception handler, which looks
+ * more like a tail call than a function call. If nested calls
+ * don't evict the last user address in the RPS, it will
+ * mispredict the return and fetch from a user controlled
+ * address into the icache.
+ *
+ * More recent EVA-capable cores with MAAR to restrict
+ * speculative accesses aren't affected.
+ */
+ MFC0 k0, CP0_ENTRYHI
+ MTC0 k0, CP0_ENTRYHI
+#endif
.set reorder
/* Called from user mode, new stack. */
get_saved_sp