summaryrefslogtreecommitdiffstats
path: root/kernel/arch/arm64/include/asm/tlbflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/arm64/include/asm/tlbflush.h')
-rw-r--r--kernel/arch/arm64/include/asm/tlbflush.h96
1 files changed, 46 insertions, 50 deletions
diff --git a/kernel/arch/arm64/include/asm/tlbflush.h b/kernel/arch/arm64/include/asm/tlbflush.h
index c3bb05b98..b460ae28e 100644
--- a/kernel/arch/arm64/include/asm/tlbflush.h
+++ b/kernel/arch/arm64/include/asm/tlbflush.h
@@ -28,8 +28,6 @@
* TLB Management
* ==============
*
- * The arch/arm64/mm/tlb.S files implement these methods.
- *
* The TLB specific code is expected to perform whatever tests it needs
* to determine if it should invalidate the TLB for each call. Start
* addresses are inclusive and end addresses are exclusive; it is safe to
@@ -65,6 +63,14 @@
* only require the D-TLB to be invalidated.
* - kaddr - Kernel virtual memory address
*/
+static inline void local_flush_tlb_all(void)
+{
+ dsb(nshst);
+ asm("tlbi vmalle1");
+ dsb(nsh);
+ isb();
+}
+
static inline void flush_tlb_all(void)
{
dsb(ishst);
@@ -75,7 +81,7 @@ static inline void flush_tlb_all(void)
static inline void flush_tlb_mm(struct mm_struct *mm)
{
- unsigned long asid = (unsigned long)ASID(mm) << 48;
+ unsigned long asid = ASID(mm) << 48;
dsb(ishst);
asm("tlbi aside1is, %0" : : "r" (asid));
@@ -85,31 +91,59 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long uaddr)
{
- unsigned long addr = uaddr >> 12 |
- ((unsigned long)ASID(vma->vm_mm) << 48);
+ unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
dsb(ishst);
- asm("tlbi vae1is, %0" : : "r" (addr));
+ asm("tlbi vale1is, %0" : : "r" (addr));
dsb(ish);
}
+/*
+ * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
+ * necessarily a performance improvement.
+ */
+#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
+
static inline void __flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ bool last_level)
{
- unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
+ unsigned long asid = ASID(vma->vm_mm) << 48;
unsigned long addr;
+
+ if ((end - start) > MAX_TLB_RANGE) {
+ flush_tlb_mm(vma->vm_mm);
+ return;
+ }
+
start = asid | (start >> 12);
end = asid | (end >> 12);
dsb(ishst);
- for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
- asm("tlbi vae1is, %0" : : "r"(addr));
+ for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
+ if (last_level)
+ asm("tlbi vale1is, %0" : : "r"(addr));
+ else
+ asm("tlbi vae1is, %0" : : "r"(addr));
+ }
dsb(ish);
}
-static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ __flush_tlb_range(vma, start, end, false);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long addr;
+
+ if ((end - start) > MAX_TLB_RANGE) {
+ flush_tlb_all();
+ return;
+ }
+
start >>= 12;
end >>= 12;
@@ -121,55 +155,17 @@ static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long e
}
/*
- * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
- * necessarily a performance improvement.
- */
-#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- if ((end - start) <= MAX_TLB_RANGE)
- __flush_tlb_range(vma, start, end);
- else
- flush_tlb_mm(vma->vm_mm);
-}
-
-static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- if ((end - start) <= MAX_TLB_RANGE)
- __flush_tlb_kernel_range(start, end);
- else
- flush_tlb_all();
-}
-
-/*
* Used to invalidate the TLB (walk caches) corresponding to intermediate page
* table levels (pgd/pud/pmd).
*/
static inline void __flush_tlb_pgtable(struct mm_struct *mm,
unsigned long uaddr)
{
- unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
+ unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
- dsb(ishst);
asm("tlbi vae1is, %0" : : "r" (addr));
dsb(ish);
}
-/*
- * On AArch64, the cache coherency is handled via the set_pte_at() function.
- */
-static inline void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- /*
- * set_pte() does not have a DSB for user mappings, so make sure that
- * the page table write is visible.
- */
- dsb(ishst);
-}
-
-#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
#endif