diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/arch/mips/mm | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/mips/mm')
-rw-r--r-- | kernel/arch/mips/mm/Makefile | 1 | ||||
-rw-r--r-- | kernel/arch/mips/mm/c-r4k.c | 7 | ||||
-rw-r--r-- | kernel/arch/mips/mm/c-tx39.c | 4 | ||||
-rw-r--r-- | kernel/arch/mips/mm/cache.c | 8 | ||||
-rw-r--r-- | kernel/arch/mips/mm/dma-default.c | 90 | ||||
-rw-r--r-- | kernel/arch/mips/mm/fault.c | 11 | ||||
-rw-r--r-- | kernel/arch/mips/mm/highmem.c | 13 | ||||
-rw-r--r-- | kernel/arch/mips/mm/hugetlbpage.c | 5 | ||||
-rw-r--r-- | kernel/arch/mips/mm/init.c | 147 | ||||
-rw-r--r-- | kernel/arch/mips/mm/sc-debugfs.c | 81 | ||||
-rw-r--r-- | kernel/arch/mips/mm/sc-mips.c | 112 | ||||
-rw-r--r-- | kernel/arch/mips/mm/tlb-r3k.c | 37 | ||||
-rw-r--r-- | kernel/arch/mips/mm/tlb-r4k.c | 2 | ||||
-rw-r--r-- | kernel/arch/mips/mm/tlbex.c | 132 |
14 files changed, 501 insertions, 149 deletions
diff --git a/kernel/arch/mips/mm/Makefile b/kernel/arch/mips/mm/Makefile index 67ede4ef9..b4c64bd3f 100644 --- a/kernel/arch/mips/mm/Makefile +++ b/kernel/arch/mips/mm/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o +obj-$(CONFIG_SCACHE_DEBUGFS) += sc-debugfs.o diff --git a/kernel/arch/mips/mm/c-r4k.c b/kernel/arch/mips/mm/c-r4k.c index dca0efc07..5d3a25e1c 100644 --- a/kernel/arch/mips/mm/c-r4k.c +++ b/kernel/arch/mips/mm/c-r4k.c @@ -303,7 +303,7 @@ static void r4k_blast_icache_page_setup(void) static void (*r4k_blast_icache_user_page)(unsigned long addr); -static void __cpuinit r4k_blast_icache_user_page_setup(void) +static void r4k_blast_icache_user_page_setup(void) { unsigned long ic_lsize = cpu_icache_line_size(); @@ -945,7 +945,9 @@ static void b5k_instruction_hazard(void) } static char *way_string[] = { NULL, "direct mapped", "2-way", - "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" + "3-way", "4-way", "5-way", "6-way", "7-way", "8-way", + "9-way", "10-way", "11-way", "12-way", + "13-way", "14-way", "15-way", "16-way", }; static void probe_pcache(void) @@ -1274,6 +1276,7 @@ static void probe_pcache(void) case CPU_PROAPTIV: case CPU_M5150: case CPU_QEMU_GENERIC: + case CPU_I6400: if (!(read_c0_config7() & MIPS_CONF7_IAR) && (c->icache.waysize > PAGE_SIZE)) c->icache.flags |= MIPS_CACHE_ALIASES; diff --git a/kernel/arch/mips/mm/c-tx39.c b/kernel/arch/mips/mm/c-tx39.c index 8d909dbbf..596e18458 100644 --- a/kernel/arch/mips/mm/c-tx39.c +++ b/kernel/arch/mips/mm/c-tx39.c @@ -28,8 +28,6 @@ static unsigned long icache_size, dcache_size; /* Size in bytes */ #include <asm/r4kcache.h> -extern int r3k_have_wired_reg; /* in r3k-tlb.c */ - /* This sequence is required to ensure icache is disabled immediately */ #define TX39_STOP_STREAMING() \ __asm__ __volatile__( \ @@ -383,8 +381,6 @@ void tx39_cache_init(void) case CPU_TX3927: default: /* TX39/H2,H3 core (writeback 2way-set-associative cache) */ - r3k_have_wired_reg = 1; - write_c0_wired(0); /* set 8 on reset... */ /* board-dependent init code may set WBON */ __flush_cache_vmap = tx39__flush_cache_vmap; diff --git a/kernel/arch/mips/mm/cache.c b/kernel/arch/mips/mm/cache.c index 77d96db82..aab218c36 100644 --- a/kernel/arch/mips/mm/cache.c +++ b/kernel/arch/mips/mm/cache.c @@ -160,18 +160,18 @@ static inline void setup_protection_map(void) protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); - protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); - protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); - protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); + protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); } else { diff --git a/kernel/arch/mips/mm/dma-default.c b/kernel/arch/mips/mm/dma-default.c index 609d1241b..730d394ce 100644 --- a/kernel/arch/mips/mm/dma-default.c +++ b/kernel/arch/mips/mm/dma-default.c @@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) else #endif #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) - if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) + if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8)) dma_flag = __GFP_DMA; else #endif @@ -112,7 +112,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) return gfp | dma_flag; } -void *dma_alloc_noncoherent(struct device *dev, size_t size, +static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp) { void *ret; @@ -128,7 +128,6 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, return ret; } -EXPORT_SYMBOL(dma_alloc_noncoherent); static void *mips_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) @@ -137,12 +136,16 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, struct page *page = NULL; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; + /* + * XXX: seems like the coherent and non-coherent implementations could + * be consolidated. + */ + if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) + return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp); gfp = massage_gfp_flags(dev, gfp); - if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC)) + if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp)) page = dma_alloc_from_contiguous(dev, count, get_order(size)); if (!page) @@ -164,24 +167,24 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, } -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) +static void mips_dma_free_noncoherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) { plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); free_pages((unsigned long) vaddr, get_order(size)); } -EXPORT_SYMBOL(dma_free_noncoherent); static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { unsigned long addr = (unsigned long) vaddr; - int order = get_order(size); unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page *page = NULL; - if (dma_release_from_coherent(dev, order, vaddr)) + if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { + mips_dma_free_noncoherent(dev, size, vaddr, dma_handle); return; + } plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); @@ -194,6 +197,40 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, __free_pages(page, get_order(size)); } +static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + struct dma_attrs *attrs) +{ + unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned long addr = (unsigned long)cpu_addr; + unsigned long off = vma->vm_pgoff; + unsigned long pfn; + int ret = -ENXIO; + + if (!plat_device_is_coherent(dev) && !hw_coherentio) + addr = CAC_ADDR(addr); + + pfn = page_to_pfn(virt_to_page((void *)addr)); + + if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + else + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + return ret; + + if (off < count && user_count <= (count - off)) { + ret = remap_pfn_range(vma, vma->vm_start, + pfn + off, + user_count << PAGE_SHIFT, + vma->vm_page_prot); + } + + return ret; +} + static inline void __dma_sync_virtual(void *addr, size_t size, enum dma_data_direction direction) { @@ -262,12 +299,13 @@ static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, plat_unmap_dma_mem(dev, dma_addr, size, direction); } -static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg, +static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { int i; + struct scatterlist *sg; - for (i = 0; i < nents; i++, sg++) { + for_each_sg(sglist, sg, nents, i) { if (!plat_device_is_coherent(dev)) __dma_sync(sg_page(sg), sg->offset, sg->length, direction); @@ -291,13 +329,14 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, return plat_map_dma_mem_page(dev, page) + offset; } -static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, +static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries, enum dma_data_direction direction, struct dma_attrs *attrs) { int i; + struct scatterlist *sg; - for (i = 0; i < nhwentries; i++, sg++) { + for_each_sg(sglist, sg, nhwentries, i) { if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) __dma_sync(sg_page(sg), sg->offset, sg->length, @@ -324,26 +363,34 @@ static void mips_dma_sync_single_for_device(struct device *dev, } static void mips_dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sg, int nelems, enum dma_data_direction direction) + struct scatterlist *sglist, int nelems, + enum dma_data_direction direction) { int i; + struct scatterlist *sg; - if (cpu_needs_post_dma_flush(dev)) - for (i = 0; i < nelems; i++, sg++) + if (cpu_needs_post_dma_flush(dev)) { + for_each_sg(sglist, sg, nelems, i) { __dma_sync(sg_page(sg), sg->offset, sg->length, direction); + } + } plat_post_dma_flush(dev); } static void mips_dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, enum dma_data_direction direction) + struct scatterlist *sglist, int nelems, + enum dma_data_direction direction) { int i; + struct scatterlist *sg; - if (!plat_device_is_coherent(dev)) - for (i = 0; i < nelems; i++, sg++) + if (!plat_device_is_coherent(dev)) { + for_each_sg(sglist, sg, nelems, i) { __dma_sync(sg_page(sg), sg->offset, sg->length, direction); + } + } } int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) @@ -370,6 +417,7 @@ EXPORT_SYMBOL(dma_cache_sync); static struct dma_map_ops mips_default_dma_map_ops = { .alloc = mips_dma_alloc_coherent, .free = mips_dma_free_coherent, + .mmap = mips_dma_mmap, .map_page = mips_dma_map_page, .unmap_page = mips_dma_unmap_page, .map_sg = mips_dma_map_sg, diff --git a/kernel/arch/mips/mm/fault.c b/kernel/arch/mips/mm/fault.c index 36c0f26fa..4b88fa031 100644 --- a/kernel/arch/mips/mm/fault.c +++ b/kernel/arch/mips/mm/fault.c @@ -57,12 +57,10 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, #ifdef CONFIG_KPROBES /* - * This is to notify the fault handler of the kprobes. The - * exception code is redundant as it is also carried in REGS, - * but we pass it anyhow. + * This is to notify the fault handler of the kprobes. */ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1, - (regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP) + current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) return; #endif @@ -133,7 +131,8 @@ good_area: #endif goto bad_area; } - if (!(vma->vm_flags & VM_READ)) { + if (!(vma->vm_flags & VM_READ) && + exception_epc(regs) != address) { #if 0 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", raw_smp_processor_id(), @@ -223,6 +222,7 @@ bad_area_nosemaphore: print_vma_addr(" ", regs->regs[31]); pr_info("\n"); } + current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ @@ -281,6 +281,7 @@ do_sigbus: field, (unsigned long) regs->cp0_epc, field, (unsigned long) regs->regs[31]); #endif + current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; tsk->thread.cp0_badvaddr = address; info.si_signo = SIGBUS; info.si_errno = 0; diff --git a/kernel/arch/mips/mm/highmem.c b/kernel/arch/mips/mm/highmem.c index 11661cbc1..d7258a103 100644 --- a/kernel/arch/mips/mm/highmem.c +++ b/kernel/arch/mips/mm/highmem.c @@ -118,19 +118,6 @@ void *kmap_atomic_pfn(unsigned long pfn) return (void*) vaddr; } -struct page *kmap_atomic_to_page(void *ptr) -{ - unsigned long idx, vaddr = (unsigned long)ptr; - pte_t *pte; - - if (vaddr < FIXADDR_START) - return virt_to_page(ptr); - - idx = virt_to_fix(vaddr); - pte = kmap_pte - (idx - FIX_KMAP_BEGIN); - return pte_page(*pte); -} - void __init kmap_init(void) { unsigned long kmap_vstart; diff --git a/kernel/arch/mips/mm/hugetlbpage.c b/kernel/arch/mips/mm/hugetlbpage.c index 06e0f421b..74aa6f624 100644 --- a/kernel/arch/mips/mm/hugetlbpage.c +++ b/kernel/arch/mips/mm/hugetlbpage.c @@ -51,11 +51,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) return (pte_t *) pmd; } -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) -{ - return 0; -} - /* * This function checks for proper alignment of input addr and len parameters. */ diff --git a/kernel/arch/mips/mm/init.c b/kernel/arch/mips/mm/init.c index 198a3147d..8770e6191 100644 --- a/kernel/arch/mips/mm/init.c +++ b/kernel/arch/mips/mm/init.c @@ -37,12 +37,14 @@ #include <asm/cpu.h> #include <asm/dma.h> #include <asm/kmap_types.h> +#include <asm/maar.h> #include <asm/mmu_context.h> #include <asm/sections.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/fixmap.h> +#include <asm/maar.h> /* * We have up to 8 empty zeroed pages so we can map one of the right colour @@ -251,6 +253,119 @@ void __init fixrange_init(unsigned long start, unsigned long end, #endif } +unsigned __weak platform_maar_init(unsigned num_pairs) +{ + struct maar_config cfg[BOOT_MEM_MAP_MAX]; + unsigned i, num_configured, num_cfg = 0; + phys_addr_t skip; + + for (i = 0; i < boot_mem_map.nr_map; i++) { + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RAM: + case BOOT_MEM_INIT_RAM: + break; + default: + continue; + } + + skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); + + cfg[num_cfg].lower = boot_mem_map.map[i].addr; + cfg[num_cfg].lower += skip; + + cfg[num_cfg].upper = cfg[num_cfg].lower; + cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; + cfg[num_cfg].upper -= skip; + + cfg[num_cfg].attrs = MIPS_MAAR_S; + num_cfg++; + } + + num_configured = maar_config(cfg, num_cfg, num_pairs); + if (num_configured < num_cfg) + pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", + num_pairs, num_cfg); + + return num_configured; +} + +void maar_init(void) +{ + unsigned num_maars, used, i; + phys_addr_t lower, upper, attr; + static struct { + struct maar_config cfgs[3]; + unsigned used; + } recorded = { { { 0 } }, 0 }; + + if (!cpu_has_maar) + return; + + /* Detect the number of MAARs */ + write_c0_maari(~0); + back_to_back_c0_hazard(); + num_maars = read_c0_maari() + 1; + + /* MAARs should be in pairs */ + WARN_ON(num_maars % 2); + + /* Set MAARs using values we recorded already */ + if (recorded.used) { + used = maar_config(recorded.cfgs, recorded.used, num_maars / 2); + BUG_ON(used != recorded.used); + } else { + /* Configure the required MAARs */ + used = platform_maar_init(num_maars / 2); + } + + /* Disable any further MAARs */ + for (i = (used * 2); i < num_maars; i++) { + write_c0_maari(i); + back_to_back_c0_hazard(); + write_c0_maar(0); + back_to_back_c0_hazard(); + } + + if (recorded.used) + return; + + pr_info("MAAR configuration:\n"); + for (i = 0; i < num_maars; i += 2) { + write_c0_maari(i); + back_to_back_c0_hazard(); + upper = read_c0_maar(); + + write_c0_maari(i + 1); + back_to_back_c0_hazard(); + lower = read_c0_maar(); + + attr = lower & upper; + lower = (lower & MIPS_MAAR_ADDR) << 4; + upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff; + + pr_info(" [%d]: ", i / 2); + if (!(attr & MIPS_MAAR_V)) { + pr_cont("disabled\n"); + continue; + } + + pr_cont("%pa-%pa", &lower, &upper); + + if (attr & MIPS_MAAR_S) + pr_cont(" speculate"); + + pr_cont("\n"); + + /* Record the setup for use on secondary CPUs */ + if (used <= ARRAY_SIZE(recorded.cfgs)) { + recorded.cfgs[recorded.used].lower = lower; + recorded.cfgs[recorded.used].upper = upper; + recorded.cfgs[recorded.used].attrs = attr; + recorded.used++; + } + } +} + #ifndef CONFIG_NEED_MULTIPLE_NODES int page_is_ram(unsigned long pagenr) { @@ -333,38 +448,6 @@ static inline void mem_init_free_highmem(void) #endif } -unsigned __weak platform_maar_init(unsigned num_maars) -{ - return 0; -} - -static void maar_init(void) -{ - unsigned num_maars, used, i; - - if (!cpu_has_maar) - return; - - /* Detect the number of MAARs */ - write_c0_maari(~0); - back_to_back_c0_hazard(); - num_maars = read_c0_maari() + 1; - - /* MAARs should be in pairs */ - WARN_ON(num_maars % 2); - - /* Configure the required MAARs */ - used = platform_maar_init(num_maars / 2); - - /* Disable any further MAARs */ - for (i = (used * 2); i < num_maars; i++) { - write_c0_maari(i); - back_to_back_c0_hazard(); - write_c0_maar(0); - back_to_back_c0_hazard(); - } -} - void __init mem_init(void) { #ifdef CONFIG_HIGHMEM diff --git a/kernel/arch/mips/mm/sc-debugfs.c b/kernel/arch/mips/mm/sc-debugfs.c new file mode 100644 index 000000000..5eefe3281 --- /dev/null +++ b/kernel/arch/mips/mm/sc-debugfs.c @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2015 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <asm/bcache.h> +#include <asm/debug.h> +#include <asm/uaccess.h> +#include <linux/debugfs.h> +#include <linux/init.h> + +static ssize_t sc_prefetch_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + bool enabled = bc_prefetch_is_enabled(); + char buf[3]; + + buf[0] = enabled ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = 0; + + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t sc_prefetch_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[32]; + ssize_t buf_size; + bool enabled; + int err; + + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + err = strtobool(buf, &enabled); + if (err) + return err; + + if (enabled) + bc_prefetch_enable(); + else + bc_prefetch_disable(); + + return count; +} + +static const struct file_operations sc_prefetch_fops = { + .open = simple_open, + .llseek = default_llseek, + .read = sc_prefetch_read, + .write = sc_prefetch_write, +}; + +static int __init sc_debugfs_init(void) +{ + struct dentry *dir, *file; + + if (!mips_debugfs_dir) + return -ENODEV; + + dir = debugfs_create_dir("l2cache", mips_debugfs_dir); + if (IS_ERR(dir)) + return PTR_ERR(dir); + + file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, + NULL, &sc_prefetch_fops); + if (IS_ERR(file)) + return PTR_ERR(file); + + return 0; +} +late_initcall(sc_debugfs_init); diff --git a/kernel/arch/mips/mm/sc-mips.c b/kernel/arch/mips/mm/sc-mips.c index 4ceafd138..ddb815461 100644 --- a/kernel/arch/mips/mm/sc-mips.c +++ b/kernel/arch/mips/mm/sc-mips.c @@ -14,6 +14,7 @@ #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/r4kcache.h> +#include <asm/mips-cm.h> /* * MIPS32/MIPS64 L2 cache handling @@ -50,11 +51,69 @@ static void mips_sc_disable(void) /* L2 cache is permanently enabled */ } +static void mips_sc_prefetch_enable(void) +{ + unsigned long pftctl; + + if (mips_cm_revision() < CM_REV_CM2_5) + return; + + /* + * If there is one or more L2 prefetch unit present then enable + * prefetching for both code & data, for all ports. + */ + pftctl = read_gcr_l2_pft_control(); + if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK) { + pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK; + pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK; + pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN_MSK; + write_gcr_l2_pft_control(pftctl); + + pftctl = read_gcr_l2_pft_control_b(); + pftctl |= CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK; + pftctl |= CM_GCR_L2_PFT_CONTROL_B_CEN_MSK; + write_gcr_l2_pft_control_b(pftctl); + } +} + +static void mips_sc_prefetch_disable(void) +{ + unsigned long pftctl; + + if (mips_cm_revision() < CM_REV_CM2_5) + return; + + pftctl = read_gcr_l2_pft_control(); + pftctl &= ~CM_GCR_L2_PFT_CONTROL_PFTEN_MSK; + write_gcr_l2_pft_control(pftctl); + + pftctl = read_gcr_l2_pft_control_b(); + pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK; + pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_CEN_MSK; + write_gcr_l2_pft_control_b(pftctl); +} + +static bool mips_sc_prefetch_is_enabled(void) +{ + unsigned long pftctl; + + if (mips_cm_revision() < CM_REV_CM2_5) + return false; + + pftctl = read_gcr_l2_pft_control(); + if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK)) + return false; + return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN_MSK); +} + static struct bcache_ops mips_sc_ops = { .bc_enable = mips_sc_enable, .bc_disable = mips_sc_disable, .bc_wback_inv = mips_sc_wback_inv, - .bc_inv = mips_sc_inv + .bc_inv = mips_sc_inv, + .bc_prefetch_enable = mips_sc_prefetch_enable, + .bc_prefetch_disable = mips_sc_prefetch_disable, + .bc_prefetch_is_enabled = mips_sc_prefetch_is_enabled, }; /* @@ -94,6 +153,43 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c) return 1; } +static int __init mips_sc_probe_cm3(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + unsigned long cfg = read_gcr_l2_config(); + unsigned long sets, line_sz, assoc; + + if (cfg & CM_GCR_L2_CONFIG_BYPASS_MSK) + return 0; + + sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK; + sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF; + if (sets) + c->scache.sets = 64 << sets; + + line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK; + line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF; + if (line_sz) + c->scache.linesz = 2 << line_sz; + + assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK; + assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF; + c->scache.ways = assoc + 1; + c->scache.waysize = c->scache.sets * c->scache.linesz; + c->scache.waybit = __ffs(c->scache.waysize); + + if (c->scache.linesz) { + c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; + return 1; + } + + return 0; +} + +void __weak platform_early_l2_init(void) +{ +} + static inline int __init mips_sc_probe(void) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -103,6 +199,15 @@ static inline int __init mips_sc_probe(void) /* Mark as not present until probe completed */ c->scache.flags |= MIPS_CACHE_NOT_PRESENT; + /* + * Do we need some platform specific probing before + * we configure L2? + */ + platform_early_l2_init(); + + if (mips_cm_revision() >= CM_REV_CM3) + return mips_sc_probe_cm3(); + /* Ignore anything but MIPSxx processors */ if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | @@ -120,13 +225,13 @@ static inline int __init mips_sc_probe(void) return 0; tmp = (config2 >> 8) & 0x0f; - if (0 <= tmp && tmp <= 7) + if (tmp <= 7) c->scache.sets = 64 << tmp; else return 0; tmp = (config2 >> 0) & 0x0f; - if (0 <= tmp && tmp <= 7) + if (tmp <= 7) c->scache.ways = tmp + 1; else return 0; @@ -144,6 +249,7 @@ int mips_sc_init(void) int found = mips_sc_probe(); if (found) { mips_sc_enable(); + mips_sc_prefetch_enable(); bcops = &mips_sc_ops; } return found; diff --git a/kernel/arch/mips/mm/tlb-r3k.c b/kernel/arch/mips/mm/tlb-r3k.c index 4094bbd42..b4f366f7c 100644 --- a/kernel/arch/mips/mm/tlb-r3k.c +++ b/kernel/arch/mips/mm/tlb-r3k.c @@ -36,30 +36,33 @@ extern void build_tlb_refill_handler(void); "nop\n\t" \ ".set pop\n\t") -int r3k_have_wired_reg; /* should be in cpu_data? */ +int r3k_have_wired_reg; /* Should be in cpu_data? */ /* TLB operations. */ -void local_flush_tlb_all(void) +static void local_flush_tlb_from(int entry) { - unsigned long flags; unsigned long old_ctx; - int entry; - -#ifdef DEBUG_TLB - printk("[tlball]"); -#endif - local_irq_save(flags); old_ctx = read_c0_entryhi() & ASID_MASK; write_c0_entrylo0(0); - entry = r3k_have_wired_reg ? read_c0_wired() : 8; - for (; entry < current_cpu_data.tlbsize; entry++) { + while (entry < current_cpu_data.tlbsize) { write_c0_index(entry << 8); write_c0_entryhi((entry | 0x80000) << 12); - BARRIER; + entry++; /* BARRIER */ tlb_write_indexed(); } write_c0_entryhi(old_ctx); +} + +void local_flush_tlb_all(void) +{ + unsigned long flags; + +#ifdef DEBUG_TLB + printk("[tlball]"); +#endif + local_irq_save(flags); + local_flush_tlb_from(r3k_have_wired_reg ? read_c0_wired() : 8); local_irq_restore(flags); } @@ -277,7 +280,13 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, void tlb_init(void) { - local_flush_tlb_all(); - + switch (current_cpu_type()) { + case CPU_TX3922: + case CPU_TX3927: + r3k_have_wired_reg = 1; + write_c0_wired(0); /* Set to 8 on reset... */ + break; + } + local_flush_tlb_from(0); build_tlb_refill_handler(); } diff --git a/kernel/arch/mips/mm/tlb-r4k.c b/kernel/arch/mips/mm/tlb-r4k.c index 08318ecb8..5037d5868 100644 --- a/kernel/arch/mips/mm/tlb-r4k.c +++ b/kernel/arch/mips/mm/tlb-r4k.c @@ -423,7 +423,7 @@ int __init has_transparent_hugepage(void) * lifetime of the system */ -int temp_tlb_entry __cpuinitdata; +int temp_tlb_entry; __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) diff --git a/kernel/arch/mips/mm/tlbex.c b/kernel/arch/mips/mm/tlbex.c index 97c87027c..29f73e002 100644 --- a/kernel/arch/mips/mm/tlbex.c +++ b/kernel/arch/mips/mm/tlbex.c @@ -35,7 +35,7 @@ #include <asm/uasm.h> #include <asm/setup.h> -static int __cpuinitdata mips_xpa_disabled; +static int mips_xpa_disabled; static int __init xpa_disable(char *s) { @@ -242,7 +242,7 @@ static void output_pgtable_bits_defines(void) pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT); pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT); #endif -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) if (cpu_has_rixi) { #ifdef _PAGE_NO_EXEC_SHIFT pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); @@ -311,6 +311,7 @@ static struct uasm_label labels[128]; static struct uasm_reloc relocs[128]; static int check_for_high_segbits; +static bool fill_includes_sw_bits; static unsigned int kscratch_used_mask; @@ -630,8 +631,14 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, unsigned int reg) { - if (cpu_has_rixi) { - UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); + if (cpu_has_rixi && _PAGE_NO_EXEC) { + if (fill_includes_sw_bits) { + UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); + } else { + UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); + UASM_i_ROTR(p, reg, reg, + ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + } } else { #ifdef CONFIG_PHYS_ADDR_T_64BIT uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); @@ -1005,21 +1012,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) * 64bit address support (36bit on a 32bit CPU) in a 32bit * Kernel is a special case. Only a few CPUs use it. */ -#ifdef CONFIG_PHYS_ADDR_T_64BIT - if (cpu_has_64bits) { - uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ - uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ - if (cpu_has_rixi) { - UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); - } else { - uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ - } - UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ - } else { + if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) { int pte_off_even = sizeof(pte_t) / 2; int pte_off_odd = pte_off_even + sizeof(pte_t); #ifdef CONFIG_XPA @@ -1043,31 +1036,23 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) uasm_i_mthc0(p, tmp, C0_ENTRYLO0); uasm_i_mthc0(p, ptep, C0_ENTRYLO1); #endif + return; } -#else + UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ if (r45k_bvahwbug()) build_tlb_probe_entry(p); - if (cpu_has_rixi) { - UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); - if (r4k_250MHZhwbug()) - UASM_i_MTC0(p, 0, C0_ENTRYLO0); - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); - } else { - UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ - if (r4k_250MHZhwbug()) - UASM_i_MTC0(p, 0, C0_ENTRYLO0); - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ - if (r45k_bvahwbug()) - uasm_i_mfc0(p, tmp, C0_INDEX); - } + build_convert_pte_to_entrylo(p, tmp); + if (r4k_250MHZhwbug()) + UASM_i_MTC0(p, 0, C0_ENTRYLO0); + UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ + build_convert_pte_to_entrylo(p, ptep); + if (r45k_bvahwbug()) + uasm_i_mfc0(p, tmp, C0_INDEX); if (r4k_250MHZhwbug()) UASM_i_MTC0(p, 0, C0_ENTRYLO1); UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ -#endif } struct mips_huge_tlb_info { @@ -1608,23 +1593,32 @@ build_pte_present(u32 **p, struct uasm_reloc **r, int pte, int ptr, int scratch, enum label_id lid) { int t = scratch >= 0 ? scratch : pte; + int cur = pte; if (cpu_has_rixi) { if (use_bbit_insns()) { uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); uasm_i_nop(p); } else { - uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT); - uasm_i_andi(p, t, t, 1); + if (_PAGE_PRESENT_SHIFT) { + uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT); + cur = t; + } + uasm_i_andi(p, t, cur, 1); uasm_il_beqz(p, r, t, lid); if (pte == t) /* You lose the SMP race :-(*/ iPTE_LW(p, pte, ptr); } } else { - uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT); - uasm_i_andi(p, t, t, 3); - uasm_i_xori(p, t, t, 3); + if (_PAGE_PRESENT_SHIFT) { + uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT); + cur = t; + } + uasm_i_andi(p, t, cur, + (_PAGE_PRESENT | _PAGE_READ) >> _PAGE_PRESENT_SHIFT); + uasm_i_xori(p, t, t, + (_PAGE_PRESENT | _PAGE_READ) >> _PAGE_PRESENT_SHIFT); uasm_il_bnez(p, r, t, lid); if (pte == t) /* You lose the SMP race :-(*/ @@ -1652,10 +1646,16 @@ build_pte_writable(u32 **p, struct uasm_reloc **r, enum label_id lid) { int t = scratch >= 0 ? scratch : pte; + int cur = pte; - uasm_i_srl(p, t, pte, _PAGE_PRESENT_SHIFT); - uasm_i_andi(p, t, t, 5); - uasm_i_xori(p, t, t, 5); + if (_PAGE_PRESENT_SHIFT) { + uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT); + cur = t; + } + uasm_i_andi(p, t, cur, + (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT); + uasm_i_xori(p, t, t, + (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT); uasm_il_bnez(p, r, t, lid); if (pte == t) /* You lose the SMP race :-(*/ @@ -2284,6 +2284,10 @@ static void config_htw_params(void) /* re-initialize the PTI field including the even/odd bit */ pwfield &= ~MIPS_PWFIELD_PTI_MASK; pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT; + if (CONFIG_PGTABLE_LEVELS >= 3) { + pwfield &= ~MIPS_PWFIELD_MDI_MASK; + pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT; + } /* Set the PTEI right shift */ ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT; pwfield |= ptei; @@ -2305,9 +2309,11 @@ static void config_htw_params(void) pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT; pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT; + if (CONFIG_PGTABLE_LEVELS >= 3) + pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT; /* If XPA has been enabled, PTEs are 64-bit in size. */ - if (read_c0_pagegrain() & PG_ELPA) + if (config_enabled(CONFIG_64BITS) || (read_c0_pagegrain() & PG_ELPA)) pwsize |= 1; write_c0_pwsize(pwsize); @@ -2345,6 +2351,41 @@ static void config_xpa_params(void) #endif } +static void check_pabits(void) +{ + unsigned long entry; + unsigned pabits, fillbits; + + if (!cpu_has_rixi || !_PAGE_NO_EXEC) { + /* + * We'll only be making use of the fact that we can rotate bits + * into the fill if the CPU supports RIXI, so don't bother + * probing this for CPUs which don't. + */ + return; + } + + write_c0_entrylo0(~0ul); + back_to_back_c0_hazard(); + entry = read_c0_entrylo0(); + + /* clear all non-PFN bits */ + entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1); + entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); + + /* find a lower bound on PABITS, and upper bound on fill bits */ + pabits = fls_long(entry) + 6; + fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0); + + /* minus the RI & XI bits */ + fillbits -= min_t(unsigned, fillbits, 2); + + if (fillbits >= ilog2(_PAGE_NO_EXEC)) + fill_includes_sw_bits = true; + + pr_debug("Entry* registers contain %u fill bits\n", fillbits); +} + void build_tlb_refill_handler(void) { /* @@ -2355,6 +2396,7 @@ void build_tlb_refill_handler(void) static int run_once = 0; output_pgtable_bits_defines(); + check_pabits(); #ifdef CONFIG_64BIT check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); |