summaryrefslogtreecommitdiffstats
path: root/kernel/arch/xtensa/include
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/arch/xtensa/include
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/arch/xtensa/include')
-rw-r--r--kernel/arch/xtensa/include/asm/Kbuild4
-rw-r--r--kernel/arch/xtensa/include/asm/asmmacro.h7
-rw-r--r--kernel/arch/xtensa/include/asm/atomic.h83
-rw-r--r--kernel/arch/xtensa/include/asm/cacheasm.h26
-rw-r--r--kernel/arch/xtensa/include/asm/cacheflush.h106
-rw-r--r--kernel/arch/xtensa/include/asm/cmpxchg.h4
-rw-r--r--kernel/arch/xtensa/include/asm/device.h19
-rw-r--r--kernel/arch/xtensa/include/asm/dma-mapping.h187
-rw-r--r--kernel/arch/xtensa/include/asm/initialize_mmu.h13
-rw-r--r--kernel/arch/xtensa/include/asm/io.h11
-rw-r--r--kernel/arch/xtensa/include/asm/irqflags.h22
-rw-r--r--kernel/arch/xtensa/include/asm/pci.h2
-rw-r--r--kernel/arch/xtensa/include/asm/pgtable.h4
-rw-r--r--kernel/arch/xtensa/include/asm/processor.h31
-rw-r--r--kernel/arch/xtensa/include/asm/stacktrace.h8
-rw-r--r--kernel/arch/xtensa/include/asm/vectors.h28
-rw-r--r--kernel/arch/xtensa/include/uapi/asm/mman.h6
17 files changed, 186 insertions, 375 deletions
diff --git a/kernel/arch/xtensa/include/asm/Kbuild b/kernel/arch/xtensa/include/asm/Kbuild
index 86a9ab2e2..b56855a13 100644
--- a/kernel/arch/xtensa/include/asm/Kbuild
+++ b/kernel/arch/xtensa/include/asm/Kbuild
@@ -2,7 +2,6 @@ generic-y += bitsperlong.h
generic-y += bug.h
generic-y += clkdev.h
generic-y += cputime.h
-generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += errno.h
@@ -19,14 +18,15 @@ generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += resource.h
-generic-y += scatterlist.h
generic-y += sections.h
generic-y += siginfo.h
generic-y += statfs.h
generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
+generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/kernel/arch/xtensa/include/asm/asmmacro.h b/kernel/arch/xtensa/include/asm/asmmacro.h
index 755320f6e..746dcc8b5 100644
--- a/kernel/arch/xtensa/include/asm/asmmacro.h
+++ b/kernel/arch/xtensa/include/asm/asmmacro.h
@@ -35,9 +35,10 @@
* __loop as
* restart loop. 'as' register must not have been modified!
*
- * __endla ar, at, incr
+ * __endla ar, as, incr
* ar start address (modified)
- * as scratch register used by macro
+ * as scratch register used by __loops/__loopi macros or
+ * end address used by __loopt macro
* inc increment
*/
@@ -97,7 +98,7 @@
.endm
/*
- * loop from ar to ax
+ * loop from ar to as
*/
.macro __loopt ar, as, at, incr_log2
diff --git a/kernel/arch/xtensa/include/asm/atomic.h b/kernel/arch/xtensa/include/asm/atomic.h
index 00b7d46b3..fd8017ce2 100644
--- a/kernel/arch/xtensa/include/asm/atomic.h
+++ b/kernel/arch/xtensa/include/asm/atomic.h
@@ -29,7 +29,7 @@
*
* Locking interrupts looks like this:
*
- * rsil a15, LOCKLEVEL
+ * rsil a15, TOPLEVEL
* <code>
* wsr a15, PS
* rsync
@@ -47,7 +47,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
/**
* atomic_set - set atomic variable
@@ -56,7 +56,7 @@
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v,i) ((v)->counter = (i))
+#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if XCHAL_HAVE_S32C1I
#define ATOMIC_OP(op) \
@@ -106,7 +106,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
unsigned int vval; \
\
__asm__ __volatile__( \
- " rsil a15, "__stringify(LOCKLEVEL)"\n"\
+ " rsil a15, "__stringify(TOPLEVEL)"\n"\
" l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \
@@ -124,7 +124,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
unsigned int vval; \
\
__asm__ __volatile__( \
- " rsil a15,"__stringify(LOCKLEVEL)"\n" \
+ " rsil a15,"__stringify(TOPLEVEL)"\n" \
" l32i %0, %2, 0\n" \
" " #op " %0, %0, %1\n" \
" s32i %0, %2, 0\n" \
@@ -145,6 +145,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -250,75 +254,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
-
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
- unsigned long tmp;
- int result;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " and %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (~mask), "a" (v)
- : "memory"
- );
-#else
- unsigned int all_f = -1;
- unsigned int vval;
-
- __asm__ __volatile__(
- " rsil a15,"__stringify(LOCKLEVEL)"\n"
- " l32i %0, %2, 0\n"
- " xor %1, %4, %3\n"
- " and %0, %0, %4\n"
- " s32i %0, %2, 0\n"
- " wsr a15, ps\n"
- " rsync\n"
- : "=&a" (vval), "=a" (mask)
- : "a" (v), "a" (all_f), "1" (mask)
- : "a15", "memory"
- );
-#endif
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
- unsigned long tmp;
- int result;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " or %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (mask), "a" (v)
- : "memory"
- );
-#else
- unsigned int vval;
-
- __asm__ __volatile__(
- " rsil a15,"__stringify(LOCKLEVEL)"\n"
- " l32i %0, %2, 0\n"
- " or %0, %0, %1\n"
- " s32i %0, %2, 0\n"
- " wsr a15, ps\n"
- " rsync\n"
- : "=&a" (vval)
- : "a" (mask), "a" (v)
- : "a15", "memory"
- );
-#endif
-}
-
#endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */
diff --git a/kernel/arch/xtensa/include/asm/cacheasm.h b/kernel/arch/xtensa/include/asm/cacheasm.h
index 60e18773e..e0f9e1109 100644
--- a/kernel/arch/xtensa/include/asm/cacheasm.h
+++ b/kernel/arch/xtensa/include/asm/cacheasm.h
@@ -73,7 +73,9 @@
.macro ___unlock_dcache_all ar at
+#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
@@ -90,30 +92,38 @@
.macro ___flush_invalidate_dcache_all ar at
+#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___flush_dcache_all ar at
+#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___invalidate_dcache_all ar at
+#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___invalidate_icache_all ar at
+#if XCHAL_ICACHE_SIZE
__loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
XCHAL_ICACHE_LINEWIDTH
+#endif
.endm
@@ -121,28 +131,36 @@
.macro ___flush_invalidate_dcache_range ar as at
+#if XCHAL_DCACHE_SIZE
__loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___flush_dcache_range ar as at
+#if XCHAL_DCACHE_SIZE
__loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___invalidate_dcache_range ar as at
+#if XCHAL_DCACHE_SIZE
__loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___invalidate_icache_range ar as at
+#if XCHAL_ICACHE_SIZE
__loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
+#endif
.endm
@@ -150,27 +168,35 @@
.macro ___flush_invalidate_dcache_page ar as
+#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___flush_dcache_page ar as
+#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___invalidate_dcache_page ar as
+#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___invalidate_icache_page ar as
+#if XCHAL_ICACHE_SIZE
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
+#endif
.endm
diff --git a/kernel/arch/xtensa/include/asm/cacheflush.h b/kernel/arch/xtensa/include/asm/cacheflush.h
index 5f67ace97..397d6a1a4 100644
--- a/kernel/arch/xtensa/include/asm/cacheflush.h
+++ b/kernel/arch/xtensa/include/asm/cacheflush.h
@@ -55,9 +55,14 @@ extern void __flush_dcache_range(unsigned long, unsigned long);
extern void __flush_invalidate_dcache_page(unsigned long);
extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
#else
-# define __flush_dcache_range(p,s) do { } while(0)
-# define __flush_dcache_page(p) do { } while(0)
-# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
+static inline void __flush_dcache_page(unsigned long va)
+{
+}
+static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
+{
+}
+# define __flush_invalidate_dcache_all() __invalidate_dcache_all()
+# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
#endif
@@ -174,99 +179,4 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*,
#endif
-#define XTENSA_CACHEBLK_LOG2 29
-#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
-#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
-
-#if XCHAL_HAVE_CACHEATTR
-static inline u32 xtensa_get_cacheattr(void)
-{
- u32 r;
- asm volatile(" rsr %0, cacheattr" : "=a"(r));
- return r;
-}
-
-static inline u32 xtensa_get_dtlb1(u32 addr)
-{
- u32 r = addr & XTENSA_CACHEBLK_MASK;
- return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
- & 0xF);
-}
-#else
-static inline u32 xtensa_get_dtlb1(u32 addr)
-{
- u32 r;
- asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
- asm volatile(" dsync");
- return r;
-}
-
-static inline u32 xtensa_get_cacheattr(void)
-{
- u32 r = 0;
- u32 a = 0;
- do {
- a -= XTENSA_CACHEBLK_SIZE;
- r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
- } while (a);
- return r;
-}
-#endif
-
-static inline int xtensa_need_flush_dma_source(u32 addr)
-{
- return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
-}
-
-static inline int xtensa_need_invalidate_dma_destination(u32 addr)
-{
- return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
-}
-
-static inline void flush_dcache_unaligned(u32 addr, u32 size)
-{
- u32 cnt;
- if (size) {
- cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
- + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
- while (cnt--) {
- asm volatile(" dhwb %0, 0" : : "a"(addr));
- addr += XCHAL_DCACHE_LINESIZE;
- }
- asm volatile(" dsync");
- }
-}
-
-static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
-{
- int cnt;
- if (size) {
- asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
- cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
- - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
- while (cnt-- > 0) {
- asm volatile(" dhi %0, %1" : : "a"(addr),
- "n"(XCHAL_DCACHE_LINESIZE));
- addr += XCHAL_DCACHE_LINESIZE;
- }
- asm volatile(" dhwbi %0, %1" : : "a"(addr),
- "n"(XCHAL_DCACHE_LINESIZE));
- asm volatile(" dsync");
- }
-}
-
-static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
-{
- u32 cnt;
- if (size) {
- cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
- + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
- while (cnt--) {
- asm volatile(" dhwbi %0, 0" : : "a"(addr));
- addr += XCHAL_DCACHE_LINESIZE;
- }
- asm volatile(" dsync");
- }
-}
-
#endif /* _XTENSA_CACHEFLUSH_H */
diff --git a/kernel/arch/xtensa/include/asm/cmpxchg.h b/kernel/arch/xtensa/include/asm/cmpxchg.h
index 370b26f38..201e9009e 100644
--- a/kernel/arch/xtensa/include/asm/cmpxchg.h
+++ b/kernel/arch/xtensa/include/asm/cmpxchg.h
@@ -34,7 +34,7 @@ __cmpxchg_u32(volatile int *p, int old, int new)
return new;
#else
__asm__ __volatile__(
- " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n"
" bne %0, %2, 1f\n"
" s32i %3, %1, 0\n"
@@ -123,7 +123,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#else
unsigned long tmp;
__asm__ __volatile__(
- " rsil a15, "__stringify(LOCKLEVEL)"\n"
+ " rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n"
" s32i %2, %1, 0\n"
" wsr a15, ps\n"
diff --git a/kernel/arch/xtensa/include/asm/device.h b/kernel/arch/xtensa/include/asm/device.h
new file mode 100644
index 000000000..fe1f5c878
--- /dev/null
+++ b/kernel/arch/xtensa/include/asm/device.h
@@ -0,0 +1,19 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_XTENSA_DEVICE_H
+#define _ASM_XTENSA_DEVICE_H
+
+struct dma_map_ops;
+
+struct dev_archdata {
+ /* DMA operations on that device */
+ struct dma_map_ops *dma_ops;
+};
+
+struct pdev_archdata {
+};
+
+#endif /* _ASM_XTENSA_DEVICE_H */
diff --git a/kernel/arch/xtensa/include/asm/dma-mapping.h b/kernel/arch/xtensa/include/asm/dma-mapping.h
index ba78ccf65..66c9ba261 100644
--- a/kernel/arch/xtensa/include/asm/dma-mapping.h
+++ b/kernel/arch/xtensa/include/asm/dma-mapping.h
@@ -1,11 +1,10 @@
/*
- * include/asm-xtensa/dma-mapping.h
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_DMA_MAPPING_H
@@ -13,189 +12,37 @@
#include <asm/cache.h>
#include <asm/io.h>
+
+#include <asm-generic/dma-coherent.h>
+
#include <linux/mm.h>
#include <linux/scatterlist.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-/*
- * DMA-consistent mapping functions.
- */
-
-extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);
-extern void consistent_free(void*, size_t, dma_addr_t);
-extern void consistent_sync(void*, size_t, int);
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
+extern struct dma_map_ops xtensa_dma_map_ops;
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- BUG_ON(direction == DMA_NONE);
- consistent_sync(ptr, size, direction);
- return virt_to_phys(ptr);
+ if (dev && dev->archdata.dma_ops)
+ return dev->archdata.dma_ops;
+ else
+ return &xtensa_dma_map_ops;
}
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- int i;
+#include <asm-generic/dma-mapping-common.h>
- BUG_ON(direction == DMA_NONE);
-
- for (i = 0; i < nents; i++, sg++ ) {
- BUG_ON(!sg_page(sg));
-
- sg->dma_address = sg_phys(sg);
- consistent_sync(sg_virt(sg), sg->length, direction);
- }
-
- return nents;
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
-
- consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
-
- consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
-}
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction dir)
-{
- int i;
- for (i = 0; i < nelems; i++, sg++)
- consistent_sync(sg_virt(sg), sg->length, dir);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction dir)
-{
- int i;
- for (i = 0; i < nelems; i++, sg++)
- consistent_sync(sg_virt(sg), sg->length, dir);
-}
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
- if(!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
-}
-
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- consistent_sync(vaddr, size, direction);
-}
-
-/* Not supported for now */
-static inline int dma_mmap_coherent(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size)
-{
- return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size)
-{
- return -EINVAL;
-}
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- return NULL;
+ return (dma_addr_t)paddr;
}
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
+ return (phys_addr_t)daddr;
}
#endif /* _XTENSA_DMA_MAPPING_H */
diff --git a/kernel/arch/xtensa/include/asm/initialize_mmu.h b/kernel/arch/xtensa/include/asm/initialize_mmu.h
index e256f2270..7a1e07596 100644
--- a/kernel/arch/xtensa/include/asm/initialize_mmu.h
+++ b/kernel/arch/xtensa/include/asm/initialize_mmu.h
@@ -161,7 +161,8 @@
#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
XCHAL_HAVE_SPANNING_WAY */
-#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS
+#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS && \
+ (XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE)
/* Enable data and instruction cache in the DEFAULT_MEMORY region
* if the processor has DTLB and ITLB.
*/
@@ -175,14 +176,18 @@
1:
sub a9, a9, a8
2:
+#if XCHAL_DCACHE_SIZE
rdtlb1 a3, a5
- ritlb1 a4, a5
and a3, a3, a6
- and a4, a4, a6
or a3, a3, a7
- or a4, a4, a7
wdtlb a3, a5
+#endif
+#if XCHAL_ICACHE_SIZE
+ ritlb1 a4, a5
+ and a4, a4, a6
+ or a4, a4, a7
witlb a4, a5
+#endif
add a5, a5, a8
bltu a8, a9, 1b
diff --git a/kernel/arch/xtensa/include/asm/io.h b/kernel/arch/xtensa/include/asm/io.h
index fe1600a09..74fed0b4e 100644
--- a/kernel/arch/xtensa/include/asm/io.h
+++ b/kernel/arch/xtensa/include/asm/io.h
@@ -25,15 +25,6 @@
#ifdef CONFIG_MMU
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
-extern unsigned long xtensa_kio_paddr;
-
-static inline unsigned long xtensa_get_kio_paddr(void)
-{
- return xtensa_kio_paddr;
-}
-#endif
-
/*
* Return the virtual address for the specified bus memory.
* Note that we currently don't support any address outside the KIO segment.
@@ -57,8 +48,10 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
else
BUG();
}
+#define ioremap_cache ioremap_cache
#define ioremap_wc ioremap_nocache
+#define ioremap_wt ioremap_nocache
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
diff --git a/kernel/arch/xtensa/include/asm/irqflags.h b/kernel/arch/xtensa/include/asm/irqflags.h
index ea36674c6..8e090c709 100644
--- a/kernel/arch/xtensa/include/asm/irqflags.h
+++ b/kernel/arch/xtensa/include/asm/irqflags.h
@@ -6,6 +6,7 @@
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_IRQFLAGS_H
@@ -23,8 +24,27 @@ static inline unsigned long arch_local_save_flags(void)
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
- asm volatile("rsil %0, "__stringify(LOCKLEVEL)
+#if XTENSA_FAKE_NMI
+#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+ unsigned long tmp;
+
+ asm volatile("rsr %0, ps\t\n"
+ "extui %1, %0, 0, 4\t\n"
+ "bgei %1, "__stringify(LOCKLEVEL)", 1f\t\n"
+ "rsil %0, "__stringify(LOCKLEVEL)"\n"
+ "1:"
+ : "=a" (flags), "=a" (tmp) :: "memory");
+#else
+ asm volatile("rsr %0, ps\t\n"
+ "or %0, %0, %1\t\n"
+ "xsr %0, ps\t\n"
+ "rsync"
+ : "=&a" (flags) : "a" (LOCKLEVEL) : "memory");
+#endif
+#else
+ asm volatile("rsil %0, "__stringify(LOCKLEVEL)
: "=a" (flags) :: "memory");
+#endif
return flags;
}
diff --git a/kernel/arch/xtensa/include/asm/pci.h b/kernel/arch/xtensa/include/asm/pci.h
index 5d52dc43d..e438a00fb 100644
--- a/kernel/arch/xtensa/include/asm/pci.h
+++ b/kernel/arch/xtensa/include/asm/pci.h
@@ -33,7 +33,7 @@ extern struct pci_controller* pcibios_alloc_controller(void);
#include <linux/types.h>
#include <linux/slab.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
#include <linux/string.h>
#include <asm/io.h>
diff --git a/kernel/arch/xtensa/include/asm/pgtable.h b/kernel/arch/xtensa/include/asm/pgtable.h
index a5e929a10..fb02fdc5e 100644
--- a/kernel/arch/xtensa/include/asm/pgtable.h
+++ b/kernel/arch/xtensa/include/asm/pgtable.h
@@ -18,7 +18,11 @@
* We only use two ring levels, user and kernel space.
*/
+#ifdef CONFIG_MMU
#define USER_RING 1 /* user ring level */
+#else
+#define USER_RING 0
+#endif
#define KERNEL_RING 0 /* kernel ring level */
/*
diff --git a/kernel/arch/xtensa/include/asm/processor.h b/kernel/arch/xtensa/include/asm/processor.h
index b61bdf0ee..83e2e4bc0 100644
--- a/kernel/arch/xtensa/include/asm/processor.h
+++ b/kernel/arch/xtensa/include/asm/processor.h
@@ -1,11 +1,10 @@
/*
- * include/asm-xtensa/processor.h
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2008 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_PROCESSOR_H
@@ -45,6 +44,14 @@
#define STACK_TOP_MAX STACK_TOP
/*
+ * General exception cause assigned to fake NMI. Fake NMI needs to be handled
+ * differently from other interrupts, but it uses common kernel entry/exit
+ * code.
+ */
+
+#define EXCCAUSE_MAPPED_NMI 62
+
+/*
* General exception cause assigned to debug exceptions. Debug exceptions go
* to their own vector, rather than the general exception vectors (user,
* kernel, double); and their specific causes are reported via DEBUGCAUSE
@@ -65,10 +72,30 @@
#define VALID_DOUBLE_EXCEPTION_ADDRESS 64
+#define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno)
+#define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL
+
+#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
+#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
+
+#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
+
+#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
+
/* LOCKLEVEL defines the interrupt level that masks all
* general-purpose interrupts.
*/
+#if defined(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) && \
+ defined(XCHAL_PROFILING_INTERRUPT) && \
+ PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
+ XCHAL_EXCM_LEVEL > 1 && \
+ IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL))
+#define LOCKLEVEL (XCHAL_EXCM_LEVEL - 1)
+#else
#define LOCKLEVEL XCHAL_EXCM_LEVEL
+#endif
+#define TOPLEVEL XCHAL_EXCM_LEVEL
+#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
* registers
diff --git a/kernel/arch/xtensa/include/asm/stacktrace.h b/kernel/arch/xtensa/include/asm/stacktrace.h
index 6a05fcb0a..fe06e8ed1 100644
--- a/kernel/arch/xtensa/include/asm/stacktrace.h
+++ b/kernel/arch/xtensa/include/asm/stacktrace.h
@@ -33,4 +33,12 @@ void walk_stackframe(unsigned long *sp,
int (*fn)(struct stackframe *frame, void *data),
void *data);
+void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
+ int (*kfn)(struct stackframe *frame, void *data),
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data);
+void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
+ int (*ufn)(struct stackframe *frame, void *data),
+ void *data);
+
#endif /* _XTENSA_STACKTRACE_H */
diff --git a/kernel/arch/xtensa/include/asm/vectors.h b/kernel/arch/xtensa/include/asm/vectors.h
index a46c53f36..288c77673 100644
--- a/kernel/arch/xtensa/include/asm/vectors.h
+++ b/kernel/arch/xtensa/include/asm/vectors.h
@@ -21,13 +21,26 @@
#include <variant/core.h>
#include <platform/hardware.h>
+#if XCHAL_HAVE_PTP_MMU
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
+#else
+#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
+#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
+#endif
#define XCHAL_KIO_SIZE 0x10000000
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
+#ifndef __ASSEMBLY__
+extern unsigned long xtensa_kio_paddr;
+
+static inline unsigned long xtensa_get_kio_paddr(void)
+{
+ return xtensa_kio_paddr;
+}
+#endif
#else
#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
#endif
@@ -48,6 +61,9 @@
#define LOAD_MEMORY_ADDRESS 0xD0003000
#endif
+#define RESET_VECTOR1_VADDR (VIRTUAL_MEMORY_ADDRESS + \
+ XCHAL_RESET_VECTOR1_PADDR)
+
#else /* !defined(CONFIG_MMU) */
/* MMU Not being used - Virtual == Physical */
@@ -60,6 +76,8 @@
/* Loaded just above possibly live vectors */
#define LOAD_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x3000)
+#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
+
#endif /* CONFIG_MMU */
#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset)
@@ -67,14 +85,6 @@
/* Used to set VECBASE register */
#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS
-#define RESET_VECTOR_VECOFS (XCHAL_RESET_VECTOR_VADDR - \
- VECBASE_RESET_VADDR)
-#define RESET_VECTOR_VADDR XC_VADDR(RESET_VECTOR_VECOFS)
-
-#define RESET_VECTOR1_VECOFS (XCHAL_RESET_VECTOR1_VADDR - \
- VECBASE_RESET_VADDR)
-#define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS)
-
#if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE
#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS)
diff --git a/kernel/arch/xtensa/include/uapi/asm/mman.h b/kernel/arch/xtensa/include/uapi/asm/mman.h
index 201aec0e0..360944e1d 100644
--- a/kernel/arch/xtensa/include/uapi/asm/mman.h
+++ b/kernel/arch/xtensa/include/uapi/asm/mman.h
@@ -74,6 +74,12 @@
*/
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
+#define MCL_ONFAULT 4 /* lock all pages that are faulted in */
+
+/*
+ * Flags for mlock
+ */
+#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */
#define MADV_NORMAL 0 /* no further special treatment */
#define MADV_RANDOM 1 /* expect random page references */