summaryrefslogtreecommitdiffstats
path: root/kernel/arch/parisc/include/asm/cacheflush.h
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/arch/parisc/include/asm/cacheflush.h
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/arch/parisc/include/asm/cacheflush.h')
-rw-r--r--kernel/arch/parisc/include/asm/cacheflush.h162
1 files changed, 162 insertions, 0 deletions
diff --git a/kernel/arch/parisc/include/asm/cacheflush.h b/kernel/arch/parisc/include/asm/cacheflush.h
new file mode 100644
index 000000000..ec2df4bab
--- /dev/null
+++ b/kernel/arch/parisc/include/asm/cacheflush.h
@@ -0,0 +1,162 @@
+#ifndef _PARISC_CACHEFLUSH_H
+#define _PARISC_CACHEFLUSH_H
+
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <asm/tlbflush.h>
+
+/* The usual comment is "Caches aren't brain-dead on the <architecture>".
+ * Unfortunately, that doesn't apply to PA-RISC. */
+
+/* Internal implementation */
+void flush_data_cache_local(void *); /* flushes local data-cache only */
+void flush_instruction_cache_local(void *); /* flushes local code-cache only */
+#ifdef CONFIG_SMP
+void flush_data_cache(void); /* flushes data-cache only (all processors) */
+void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
+#else
+#define flush_data_cache() flush_data_cache_local(NULL)
+#define flush_instruction_cache() flush_instruction_cache_local(NULL)
+#endif
+
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+void flush_user_icache_range_asm(unsigned long, unsigned long);
+void flush_kernel_icache_range_asm(unsigned long, unsigned long);
+void flush_user_dcache_range_asm(unsigned long, unsigned long);
+void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
+void flush_kernel_dcache_page_asm(void *);
+void flush_kernel_icache_page(void *);
+void flush_user_dcache_range(unsigned long, unsigned long);
+void flush_user_icache_range(unsigned long, unsigned long);
+
+/* Cache flush operations */
+
+void flush_cache_all_local(void);
+void flush_cache_all(void);
+void flush_cache_mm(struct mm_struct *mm);
+
+#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+void flush_kernel_dcache_page_addr(void *addr);
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+ flush_kernel_dcache_page_addr(page_address(page));
+}
+
+#define flush_kernel_dcache_range(start,size) \
+ flush_kernel_dcache_range_asm((start), (start)+(size));
+/* vmap range flushes and invalidates. Architecturally, we don't need
+ * the invalidate, because the CPU should refuse to speculate once an
+ * area has been flushed, so invalidate is left empty */
+static inline void flush_kernel_vmap_range(void *vaddr, int size)
+{
+ unsigned long start = (unsigned long)vaddr;
+
+ flush_kernel_dcache_range_asm(start, start + size);
+}
+static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+ unsigned long start = (unsigned long)vaddr;
+ void *cursor = vaddr;
+
+ for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
+ struct page *page = vmalloc_to_page(cursor);
+
+ if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+ flush_kernel_dcache_page(page);
+ }
+ flush_kernel_dcache_range_asm(start, start + size);
+}
+
+#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vunmap(start, end) flush_cache_all()
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+extern void flush_dcache_page(struct page *page);
+
+#define flush_dcache_mmap_lock(mapping) \
+ spin_lock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_unlock(mapping) \
+ spin_unlock_irq(&(mapping)->tree_lock)
+
+#define flush_icache_page(vma,page) do { \
+ flush_kernel_dcache_page(page); \
+ flush_kernel_icache_page(page_address(page)); \
+} while (0)
+
+#define flush_icache_range(s,e) do { \
+ flush_kernel_dcache_range_asm(s,e); \
+ flush_kernel_icache_range_asm(s,e); \
+} while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ flush_cache_page(vma, vaddr, page_to_pfn(page)); \
+ memcpy(dst, src, len); \
+ flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ flush_cache_page(vma, vaddr, page_to_pfn(page)); \
+ memcpy(dst, src, len); \
+} while (0)
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
+void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+
+/* defined in pacache.S exported in cache.c used by flush_anon_page */
+void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+
+#define ARCH_HAS_FLUSH_ANON_PAGE
+static inline void
+flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+{
+ if (PageAnon(page)) {
+ flush_tlb_page(vma, vmaddr);
+ preempt_disable();
+ flush_dcache_page_asm(page_to_phys(page), vmaddr);
+ preempt_enable();
+ }
+}
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
+#include <asm/kmap_types.h>
+
+#define ARCH_HAS_KMAP
+
+static inline void *kmap(struct page *page)
+{
+ might_sleep();
+ return page_address(page);
+}
+
+static inline void kunmap(struct page *page)
+{
+ flush_kernel_dcache_page_addr(page_address(page));
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+ preempt_disable();
+ pagefault_disable();
+ return page_address(page);
+}
+
+static inline void __kunmap_atomic(void *addr)
+{
+ flush_kernel_dcache_page_addr(addr);
+ pagefault_enable();
+ preempt_enable();
+}
+
+#define kmap_atomic_prot(page, prot) kmap_atomic(page)
+#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
+#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
+
+#endif /* _PARISC_CACHEFLUSH_H */
+