summaryrefslogtreecommitdiffstats
path: root/kernel/arch/mips/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/mips/include/asm')
-rw-r--r--kernel/arch/mips/include/asm/fpu.h2
-rw-r--r--kernel/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h10
-rw-r--r--kernel/arch/mips/include/asm/pgtable.h31
-rw-r--r--kernel/arch/mips/include/asm/smp.h1
-rw-r--r--kernel/arch/mips/include/asm/stackframe.h25
5 files changed, 58 insertions, 11 deletions
diff --git a/kernel/arch/mips/include/asm/fpu.h b/kernel/arch/mips/include/asm/fpu.h
index 084780b35..1b0625189 100644
--- a/kernel/arch/mips/include/asm/fpu.h
+++ b/kernel/arch/mips/include/asm/fpu.h
@@ -74,7 +74,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
goto fr_common;
case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
+#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) \
|| defined(CONFIG_64BIT))
/* we only have a 32-bit FPU */
return SIGFPE;
diff --git a/kernel/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/kernel/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644
index 11d3b572b..000000000
--- a/kernel/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-
-#define plat_post_dma_flush bmips_post_dma_flush
-
-#include <asm/mach-generic/dma-coherence.h>
-
-#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
diff --git a/kernel/arch/mips/include/asm/pgtable.h b/kernel/arch/mips/include/asm/pgtable.h
index 819af9d05..70f6e7f07 100644
--- a/kernel/arch/mips/include/asm/pgtable.h
+++ b/kernel/arch/mips/include/asm/pgtable.h
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
+#ifdef CONFIG_SMP
+ /*
+ * For SMP, multiple CPUs can race, so we need to do
+ * this atomically.
+ */
+#ifdef CONFIG_64BIT
+#define LL_INSN "lld"
+#define SC_INSN "scd"
+#else /* CONFIG_32BIT */
+#define LL_INSN "ll"
+#define SC_INSN "sc"
+#endif
+ unsigned long page_global = _PAGE_GLOBAL;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ " .set push\n"
+ " .set noreorder\n"
+ "1: " LL_INSN " %[tmp], %[buddy]\n"
+ " bnez %[tmp], 2f\n"
+ " or %[tmp], %[tmp], %[global]\n"
+ " " SC_INSN " %[tmp], %[buddy]\n"
+ " beqz %[tmp], 1b\n"
+ " nop\n"
+ "2:\n"
+ " .set pop"
+ : [buddy] "+m" (buddy->pte),
+ [tmp] "=&r" (tmp)
+ : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
}
#endif
}
diff --git a/kernel/arch/mips/include/asm/smp.h b/kernel/arch/mips/include/asm/smp.h
index 2b25d1ba1..16f1ea9ab 100644
--- a/kernel/arch/mips/include/asm/smp.h
+++ b/kernel/arch/mips/include/asm/smp.h
@@ -23,6 +23,7 @@
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
+extern cpumask_t cpu_foreign_map;
#define raw_smp_processor_id() (current_thread_info()->cpu)
diff --git a/kernel/arch/mips/include/asm/stackframe.h b/kernel/arch/mips/include/asm/stackframe.h
index 28d6d9364..a71da5768 100644
--- a/kernel/arch/mips/include/asm/stackframe.h
+++ b/kernel/arch/mips/include/asm/stackframe.h
@@ -152,6 +152,31 @@
.set noreorder
bltz k0, 8f
move k1, sp
+#ifdef CONFIG_EVA
+ /*
+ * Flush interAptiv's Return Prediction Stack (RPS) by writing
+ * EntryHi. Toggling Config7.RPS is slower and less portable.
+ *
+ * The RPS isn't automatically flushed when exceptions are
+ * taken, which can result in kernel mode speculative accesses
+ * to user addresses if the RPS mispredicts. That's harmless
+ * when user and kernel share the same address space, but with
+ * EVA the same user segments may be unmapped to kernel mode,
+ * even containing sensitive MMIO regions or invalid memory.
+ *
+ * This can happen when the kernel sets the return address to
+ * ret_from_* and jr's to the exception handler, which looks
+ * more like a tail call than a function call. If nested calls
+ * don't evict the last user address in the RPS, it will
+ * mispredict the return and fetch from a user controlled
+ * address into the icache.
+ *
+ * More recent EVA-capable cores with MAAR to restrict
+ * speculative accesses aren't affected.
+ */
+ MFC0 k0, CP0_ENTRYHI
+ MTC0 k0, CP0_ENTRYHI
+#endif
.set reorder
/* Called from user mode, new stack. */
get_saved_sp