summaryrefslogtreecommitdiffstats
path: root/kernel/arch/arc/include
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/arc/include')
-rw-r--r--kernel/arch/arc/include/asm/arcregs.h6
-rw-r--r--kernel/arch/arc/include/asm/bitops.h15
-rw-r--r--kernel/arch/arc/include/asm/cacheflush.h6
-rw-r--r--kernel/arch/arc/include/asm/delay.h11
-rw-r--r--kernel/arch/arc/include/asm/entry.h4
-rw-r--r--kernel/arch/arc/include/asm/io.h45
-rw-r--r--kernel/arch/arc/include/asm/irqflags-compact.h2
-rw-r--r--kernel/arch/arc/include/asm/pgtable.h5
-rw-r--r--kernel/arch/arc/include/asm/uaccess.h11
9 files changed, 56 insertions, 49 deletions
diff --git a/kernel/arch/arc/include/asm/arcregs.h b/kernel/arch/arc/include/asm/arcregs.h
index 7fac7d85e..2c30a016c 100644
--- a/kernel/arch/arc/include/asm/arcregs.h
+++ b/kernel/arch/arc/include/asm/arcregs.h
@@ -374,12 +374,6 @@ static inline int is_isa_arcompact(void)
return IS_ENABLED(CONFIG_ISA_ARCOMPACT);
}
-#if defined(CONFIG_ISA_ARCOMPACT) && !defined(_CPU_DEFAULT_A7)
-#error "Toolchain not configured for ARCompact builds"
-#elif defined(CONFIG_ISA_ARCV2) && !defined(_CPU_DEFAULT_HS)
-#error "Toolchain not configured for ARCv2 builds"
-#endif
-
#endif /* __ASEMBLY__ */
#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/kernel/arch/arc/include/asm/bitops.h b/kernel/arch/arc/include/asm/bitops.h
index 57c1f3384..0352fb8d2 100644
--- a/kernel/arch/arc/include/asm/bitops.h
+++ b/kernel/arch/arc/include/asm/bitops.h
@@ -35,21 +35,6 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
\
m += nr >> 5; \
\
- /* \
- * ARC ISA micro-optimization: \
- * \
- * Instructions dealing with bitpos only consider lower 5 bits \
- * e.g (x << 33) is handled like (x << 1) by ASL instruction \
- * (mem pointer still needs adjustment to point to next word) \
- * \
- * Hence the masking to clamp @nr arg can be elided in general. \
- * \
- * However if @nr is a constant (above assumed in a register), \
- * and greater than 31, gcc can optimize away (x << 33) to 0, \
- * as overflow, given the 32-bit ISA. Thus masking needs to be \
- * done for const @nr, but no code is generated due to gcc \
- * const prop. \
- */ \
nr &= 0x1f; \
\
__asm__ __volatile__( \
diff --git a/kernel/arch/arc/include/asm/cacheflush.h b/kernel/arch/arc/include/asm/cacheflush.h
index fbe3587c4..56aeb5efe 100644
--- a/kernel/arch/arc/include/asm/cacheflush.h
+++ b/kernel/arch/arc/include/asm/cacheflush.h
@@ -85,6 +85,10 @@ void flush_anon_page(struct vm_area_struct *vma,
*/
#define PG_dc_clean PG_arch_1
+#define CACHE_COLORS_NUM 4
+#define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
+#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
+
/*
* Simple wrapper over config option
* Bootup code ensures that hardware matches kernel configuration
@@ -94,8 +98,6 @@ static inline int cache_is_vipt_aliasing(void)
return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
}
-#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
-
/*
* checks if two addresses (after page aligning) index into same cache set
*/
diff --git a/kernel/arch/arc/include/asm/delay.h b/kernel/arch/arc/include/asm/delay.h
index 08e7e2a16..d5da2115d 100644
--- a/kernel/arch/arc/include/asm/delay.h
+++ b/kernel/arch/arc/include/asm/delay.h
@@ -22,10 +22,13 @@
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__(
- " lp 1f \n"
- " nop \n"
- "1: \n"
- : "+l"(loops));
+ " mov lp_count, %0 \n"
+ " lp 1f \n"
+ " nop \n"
+ "1: \n"
+ :
+ : "r"(loops)
+ : "lp_count");
}
extern void __bad_udelay(void);
diff --git a/kernel/arch/arc/include/asm/entry.h b/kernel/arch/arc/include/asm/entry.h
index ad7860c5c..51597f344 100644
--- a/kernel/arch/arc/include/asm/entry.h
+++ b/kernel/arch/arc/include/asm/entry.h
@@ -142,7 +142,7 @@
#ifdef CONFIG_ARC_CURR_IN_REG
; Retrieve orig r25 and save it with rest of callee_regs
- ld.as r12, [r12, PT_user_r25]
+ ld r12, [r12, PT_user_r25]
PUSH r12
#else
PUSH r25
@@ -198,7 +198,7 @@
; SP is back to start of pt_regs
#ifdef CONFIG_ARC_CURR_IN_REG
- st.as r12, [sp, PT_user_r25]
+ st r12, [sp, PT_user_r25]
#endif
.endm
diff --git a/kernel/arch/arc/include/asm/io.h b/kernel/arch/arc/include/asm/io.h
index 694ece8a0..cb69299a4 100644
--- a/kernel/arch/arc/include/asm/io.h
+++ b/kernel/arch/arc/include/asm/io.h
@@ -13,6 +13,15 @@
#include <asm/byteorder.h>
#include <asm/page.h>
+#ifdef CONFIG_ISA_ARCV2
+#include <asm/barrier.h>
+#define __iormb() rmb()
+#define __iowmb() wmb()
+#else
+#define __iormb() do { } while (0)
+#define __iowmb() do { } while (0)
+#endif
+
extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
unsigned long flags);
@@ -22,6 +31,15 @@ extern void iounmap(const void __iomem *addr);
#define ioremap_wc(phy, sz) ioremap(phy, sz)
#define ioremap_wt(phy, sz) ioremap(phy, sz)
+/*
+ * io{read,write}{16,32}be() macros
+ */
+#define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+
+#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
+
/* Change struct page to physical address */
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
@@ -99,15 +117,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
}
-#ifdef CONFIG_ISA_ARCV2
-#include <asm/barrier.h>
-#define __iormb() rmb()
-#define __iowmb() wmb()
-#else
-#define __iormb() do { } while (0)
-#define __iowmb() do { } while (0)
-#endif
-
/*
* MMIO can also get buffered/optimized in micro-arch, so barriers needed
* Based on ARM model for the typical use case
@@ -129,15 +138,23 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
/*
- * Relaxed API for drivers which can handle any ordering themselves
+ * Relaxed API for drivers which can handle barrier ordering themselves
+ *
+ * Also these are defined to perform little endian accesses.
+ * To provide the typical device register semantics of fixed endian,
+ * swap the byte order for Big Endian
+ *
+ * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
*/
#define readb_relaxed(c) __raw_readb(c)
-#define readw_relaxed(c) __raw_readw(c)
-#define readl_relaxed(c) __raw_readl(c)
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
+ __raw_readw(c)); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+ __raw_readl(c)); __r; })
#define writeb_relaxed(v,c) __raw_writeb(v,c)
-#define writew_relaxed(v,c) __raw_writew(v,c)
-#define writel_relaxed(v,c) __raw_writel(v,c)
+#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
+#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
#include <asm-generic/io.h>
diff --git a/kernel/arch/arc/include/asm/irqflags-compact.h b/kernel/arch/arc/include/asm/irqflags-compact.h
index c1d36458b..4c6eed80c 100644
--- a/kernel/arch/arc/include/asm/irqflags-compact.h
+++ b/kernel/arch/arc/include/asm/irqflags-compact.h
@@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void)
.endm
.macro IRQ_ENABLE scratch
+ TRACE_ASM_IRQ_ENABLE
lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
- TRACE_ASM_IRQ_ENABLE
.endm
#endif /* __ASSEMBLY__ */
diff --git a/kernel/arch/arc/include/asm/pgtable.h b/kernel/arch/arc/include/asm/pgtable.h
index 57af2f05a..e5fec320f 100644
--- a/kernel/arch/arc/include/asm/pgtable.h
+++ b/kernel/arch/arc/include/asm/pgtable.h
@@ -110,7 +110,7 @@
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
/* More Abbrevaited helpers */
#define PAGE_U_NONE __pgprot(___DEF)
@@ -277,8 +277,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
-#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
- pgprot_val(prot)))
+#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
/*
diff --git a/kernel/arch/arc/include/asm/uaccess.h b/kernel/arch/arc/include/asm/uaccess.h
index d1da6032b..d4d8df706 100644
--- a/kernel/arch/arc/include/asm/uaccess.h
+++ b/kernel/arch/arc/include/asm/uaccess.h
@@ -83,7 +83,10 @@
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
- "3: mov %0, %3\n" \
+ "3: # return -EFAULT\n" \
+ " mov %0, %3\n" \
+ " # zero out dst ptr\n" \
+ " mov %1, 0\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \
@@ -101,7 +104,11 @@
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
- "3: mov %0, %3\n" \
+ "3: # return -EFAULT\n" \
+ " mov %0, %3\n" \
+ " # zero out dst ptr\n" \
+ " mov %1, 0\n" \
+ " mov %R1, 0\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \