diff options
Diffstat (limited to 'kernel/arch/arm/include/asm')
63 files changed, 1028 insertions, 745 deletions
diff --git a/kernel/arch/arm/include/asm/Kbuild b/kernel/arch/arm/include/asm/Kbuild index 3c4596d0c..bd425302c 100644 --- a/kernel/arch/arm/include/asm/Kbuild +++ b/kernel/arch/arm/include/asm/Kbuild @@ -12,15 +12,15 @@ generic-y += irq_regs.h generic-y += kdebug.h generic-y += local.h generic-y += local64.h -generic-y += mcs_spinlock.h +generic-y += mm-arch-hooks.h generic-y += msgbuf.h +generic-y += msi.h generic-y += param.h generic-y += parport.h generic-y += poll.h generic-y += preempt.h generic-y += resource.h generic-y += rwsem.h -generic-y += scatterlist.h generic-y += seccomp.h generic-y += sections.h generic-y += segment.h diff --git a/kernel/arch/arm/include/asm/arch_gicv3.h b/kernel/arch/arm/include/asm/arch_gicv3.h new file mode 100644 index 000000000..7da5503c0 --- /dev/null +++ b/kernel/arch/arm/include/asm/arch_gicv3.h @@ -0,0 +1,189 @@ +/* + * arch/arm/include/asm/arch_gicv3.h + * + * Copyright (C) 2015 ARM Ltd. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_ARCH_GICV3_H +#define __ASM_ARCH_GICV3_H + +#ifndef __ASSEMBLY__ + +#include <linux/io.h> +#include <asm/barrier.h> + +#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2 +#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm + +#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1) +#define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1) +#define ICC_IAR1 __ACCESS_CP15(c12, 0, c12, 0) +#define ICC_SGI1R __ACCESS_CP15_64(0, c12) +#define ICC_PMR __ACCESS_CP15(c4, 0, c6, 0) +#define ICC_CTLR __ACCESS_CP15(c12, 0, c12, 4) +#define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5) +#define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7) + +#define ICC_HSRE __ACCESS_CP15(c12, 4, c9, 5) + +#define ICH_VSEIR __ACCESS_CP15(c12, 4, c9, 4) +#define ICH_HCR __ACCESS_CP15(c12, 4, c11, 0) +#define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1) +#define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2) +#define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3) +#define ICH_ELSR __ACCESS_CP15(c12, 4, c11, 5) +#define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7) + +#define __LR0(x) __ACCESS_CP15(c12, 4, c12, x) +#define __LR8(x) __ACCESS_CP15(c12, 4, c13, x) + +#define ICH_LR0 __LR0(0) +#define ICH_LR1 __LR0(1) +#define ICH_LR2 __LR0(2) +#define ICH_LR3 __LR0(3) +#define ICH_LR4 __LR0(4) +#define ICH_LR5 __LR0(5) +#define ICH_LR6 __LR0(6) +#define ICH_LR7 __LR0(7) +#define ICH_LR8 __LR8(0) +#define ICH_LR9 __LR8(1) +#define ICH_LR10 __LR8(2) +#define ICH_LR11 __LR8(3) +#define ICH_LR12 __LR8(4) +#define ICH_LR13 __LR8(5) +#define ICH_LR14 __LR8(6) +#define ICH_LR15 __LR8(7) + +/* LR top half */ +#define __LRC0(x) __ACCESS_CP15(c12, 4, c14, x) +#define __LRC8(x) __ACCESS_CP15(c12, 4, c15, x) + +#define ICH_LRC0 __LRC0(0) +#define ICH_LRC1 __LRC0(1) +#define ICH_LRC2 __LRC0(2) +#define ICH_LRC3 __LRC0(3) +#define ICH_LRC4 __LRC0(4) +#define ICH_LRC5 __LRC0(5) +#define ICH_LRC6 __LRC0(6) +#define ICH_LRC7 __LRC0(7) +#define ICH_LRC8 __LRC8(0) +#define ICH_LRC9 __LRC8(1) +#define ICH_LRC10 __LRC8(2) +#define ICH_LRC11 __LRC8(3) +#define ICH_LRC12 __LRC8(4) +#define ICH_LRC13 __LRC8(5) +#define ICH_LRC14 __LRC8(6) +#define ICH_LRC15 __LRC8(7) + +#define __AP0Rx(x) __ACCESS_CP15(c12, 4, c8, x) +#define ICH_AP0R0 __AP0Rx(0) +#define ICH_AP0R1 __AP0Rx(1) +#define ICH_AP0R2 __AP0Rx(2) +#define ICH_AP0R3 __AP0Rx(3) + +#define __AP1Rx(x) __ACCESS_CP15(c12, 4, c9, x) +#define ICH_AP1R0 __AP1Rx(0) +#define ICH_AP1R1 __AP1Rx(1) +#define ICH_AP1R2 __AP1Rx(2) +#define ICH_AP1R3 __AP1Rx(3) + +/* Low-level accessors */ + +static inline void gic_write_eoir(u32 irq) +{ + asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq)); + isb(); +} + +static inline void gic_write_dir(u32 val) +{ + asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val)); + isb(); +} + +static inline u32 gic_read_iar(void) +{ + u32 irqstat; + + asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat)); + return irqstat; +} + +static inline void gic_write_pmr(u32 val) +{ + asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val)); +} + +static inline void gic_write_ctlr(u32 val) +{ + asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val)); + isb(); +} + +static inline void gic_write_grpen1(u32 val) +{ + asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val)); + isb(); +} + +static inline void gic_write_sgi1r(u64 val) +{ + asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val)); +} + +static inline u32 gic_read_sre(void) +{ + u32 val; + + asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val)); + return val; +} + +static inline void gic_write_sre(u32 val) +{ + asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val)); + isb(); +} + +/* + * Even in 32bit systems that use LPAE, there is no guarantee that the I/O + * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't + * make much sense. + * Moreover, 64bit I/O emulation is extremely difficult to implement on + * AArch32, since the syndrome register doesn't provide any information for + * them. + * Consequently, the following IO helpers use 32bit accesses. + * + * There are only two registers that need 64bit accesses in this driver: + * - GICD_IROUTERn, contain the affinity values associated to each interrupt. + * The upper-word (aff3) will always be 0, so there is no need for a lock. + * - GICR_TYPER is an ID register and doesn't need atomicity. + */ +static inline void gic_write_irouter(u64 val, volatile void __iomem *addr) +{ + writel_relaxed((u32)val, addr); + writel_relaxed((u32)(val >> 32), addr + 4); +} + +static inline u64 gic_read_typer(const volatile void __iomem *addr) +{ + u64 val; + + val = readl_relaxed(addr); + val |= (u64)readl_relaxed(addr + 4) << 32; + return val; +} + +#endif /* !__ASSEMBLY__ */ +#endif /* !__ASM_ARCH_GICV3_H */ diff --git a/kernel/arch/arm/include/asm/assembler.h b/kernel/arch/arm/include/asm/assembler.h index 186270b3e..b2bc8e114 100644 --- a/kernel/arch/arm/include/asm/assembler.h +++ b/kernel/arch/arm/include/asm/assembler.h @@ -108,33 +108,37 @@ .endm #endif - .macro asm_trace_hardirqs_off + .macro asm_trace_hardirqs_off, save=1 #if defined(CONFIG_TRACE_IRQFLAGS) + .if \save stmdb sp!, {r0-r3, ip, lr} + .endif bl trace_hardirqs_off + .if \save ldmia sp!, {r0-r3, ip, lr} + .endif #endif .endm - .macro asm_trace_hardirqs_on_cond, cond + .macro asm_trace_hardirqs_on, cond=al, save=1 #if defined(CONFIG_TRACE_IRQFLAGS) /* * actually the registers should be pushed and pop'd conditionally, but * after bl the flags are certainly clobbered */ + .if \save stmdb sp!, {r0-r3, ip, lr} + .endif bl\cond trace_hardirqs_on + .if \save ldmia sp!, {r0-r3, ip, lr} + .endif #endif .endm - .macro asm_trace_hardirqs_on - asm_trace_hardirqs_on_cond al - .endm - - .macro disable_irq + .macro disable_irq, save=1 disable_irq_notrace - asm_trace_hardirqs_off + asm_trace_hardirqs_off \save .endm .macro enable_irq @@ -173,11 +177,26 @@ .macro restore_irqs, oldcpsr tst \oldcpsr, #PSR_I_BIT - asm_trace_hardirqs_on_cond eq + asm_trace_hardirqs_on cond=eq restore_irqs_notrace \oldcpsr .endm /* + * Assembly version of "adr rd, BSYM(sym)". This should only be used to + * reference local symbols in the same assembly file which are to be + * resolved by the assembler. Other usage is undefined. + */ + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo + .macro badr\c, rd, sym +#ifdef CONFIG_THUMB2_KERNEL + adr\c \rd, \sym + 1 +#else + adr\c \rd, \sym +#endif + .endm + .endr + +/* * Get current thread_info. */ .macro get_thread_info, rd @@ -326,7 +345,7 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) bne 1f orr \reg, \reg, #PSR_A_BIT - adr lr, BSYM(2f) + badr lr, 2f msr spsr_cxsf, \reg __MSR_ELR_HYP(14) __ERET @@ -430,6 +449,48 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) #endif .endm + .macro uaccess_disable, tmp, isb=1 +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Whenever we re-enter userspace, the domains should always be + * set appropriately. + */ + mov \tmp, #DACR_UACCESS_DISABLE + mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register + .if \isb + instr_sync + .endif +#endif + .endm + + .macro uaccess_enable, tmp, isb=1 +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Whenever we re-enter userspace, the domains should always be + * set appropriately. + */ + mov \tmp, #DACR_UACCESS_ENABLE + mcr p15, 0, \tmp, c3, c0, 0 + .if \isb + instr_sync + .endif +#endif + .endm + + .macro uaccess_save, tmp +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + mrc p15, 0, \tmp, c3, c0, 0 + str \tmp, [sp, #S_FRAME_SIZE] +#endif + .endm + + .macro uaccess_restore +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + ldr r0, [sp, #S_FRAME_SIZE] + mcr p15, 0, r0, c3, c0, 0 +#endif + .endm + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .macro ret\c, reg #if __LINUX_ARM_ARCH__ < 6 diff --git a/kernel/arch/arm/include/asm/atomic.h b/kernel/arch/arm/include/asm/atomic.h index e22c11970..9e10c4567 100644 --- a/kernel/arch/arm/include/asm/atomic.h +++ b/kernel/arch/arm/include/asm/atomic.h @@ -27,8 +27,8 @@ * strex/ldrex monitor on some implementations. The reason we can use it for * atomic_set() is the clrex or dummy strex done on every exception return. */ -#define atomic_read(v) ACCESS_ONCE((v)->counter) -#define atomic_set(v,i) (((v)->counter) = (i)) +#define atomic_read(v) READ_ONCE((v)->counter) +#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) #if __LINUX_ARM_ARCH__ >= 6 @@ -57,12 +57,11 @@ static inline void atomic_##op(int i, atomic_t *v) \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result; \ \ - smp_mb(); \ prefetchw(&v->counter); \ \ __asm__ __volatile__("@ atomic_" #op "_return\n" \ @@ -75,17 +74,17 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ : "r" (&v->counter), "Ir" (i) \ : "cc"); \ \ - smp_mb(); \ - \ return result; \ } -static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +#define atomic_add_return_relaxed atomic_add_return_relaxed +#define atomic_sub_return_relaxed atomic_sub_return_relaxed + +static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) { int oldval; unsigned long res; - smp_mb(); prefetchw(&ptr->counter); do { @@ -99,10 +98,9 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) : "cc"); } while (res); - smp_mb(); - return oldval; } +#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed static inline int __atomic_add_unless(atomic_t *v, int a, int u) { @@ -194,6 +192,13 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) +#define atomic_andnot atomic_andnot + +ATOMIC_OP(and, &=, and) +ATOMIC_OP(andnot, &= ~, bic) +ATOMIC_OP(or, |=, orr) +ATOMIC_OP(xor, ^=, eor) + #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -205,8 +210,8 @@ ATOMIC_OPS(sub, -=, sub) #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -#define atomic_inc_return(v) (atomic_add_return(1, v)) -#define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v)) +#define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v)) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) @@ -290,12 +295,12 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \ } \ #define ATOMIC64_OP_RETURN(op, op1, op2) \ -static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ +static inline long long \ +atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \ { \ long long result; \ unsigned long tmp; \ \ - smp_mb(); \ prefetchw(&v->counter); \ \ __asm__ __volatile__("@ atomic64_" #op "_return\n" \ @@ -309,8 +314,6 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ : "r" (&v->counter), "r" (i) \ : "cc"); \ \ - smp_mb(); \ - \ return result; \ } @@ -321,17 +324,26 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ ATOMIC64_OPS(add, adds, adc) ATOMIC64_OPS(sub, subs, sbc) +#define atomic64_add_return_relaxed atomic64_add_return_relaxed +#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed + +#define atomic64_andnot atomic64_andnot + +ATOMIC64_OP(and, and, and) +ATOMIC64_OP(andnot, bic, bic) +ATOMIC64_OP(or, orr, orr) +ATOMIC64_OP(xor, eor, eor) + #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, - long long new) +static inline long long +atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new) { long long oldval; unsigned long res; - smp_mb(); prefetchw(&ptr->counter); do { @@ -346,17 +358,15 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, : "cc"); } while (res); - smp_mb(); - return oldval; } +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed -static inline long long atomic64_xchg(atomic64_t *ptr, long long new) +static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new) { long long result; unsigned long tmp; - smp_mb(); prefetchw(&ptr->counter); __asm__ __volatile__("@ atomic64_xchg\n" @@ -368,10 +378,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) : "r" (&ptr->counter), "r" (new) : "cc"); - smp_mb(); - return result; } +#define atomic64_xchg_relaxed atomic64_xchg_relaxed static inline long long atomic64_dec_if_positive(atomic64_t *v) { @@ -433,11 +442,11 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define atomic64_inc(v) atomic64_add(1LL, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) +#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v)) #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) #define atomic64_dec(v) atomic64_sub(1LL, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) +#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v)) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) diff --git a/kernel/arch/arm/include/asm/barrier.h b/kernel/arch/arm/include/asm/barrier.h index d2f81e6b8..3ff5642d9 100644 --- a/kernel/arch/arm/include/asm/barrier.h +++ b/kernel/arch/arm/include/asm/barrier.h @@ -2,7 +2,6 @@ #define __ASM_BARRIER_H #ifndef __ASSEMBLY__ -#include <asm/outercache.h> #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); @@ -37,12 +36,20 @@ #define dmb(x) __asm__ __volatile__ ("" : : : "memory") #endif +#ifdef CONFIG_ARM_HEAVY_MB +extern void (*soc_mb)(void); +extern void arm_heavy_mb(void); +#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0) +#else +#define __arm_heavy_mb(x...) dsb(x) +#endif + #ifdef CONFIG_ARCH_HAS_BARRIERS #include <mach/barriers.h> #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) -#define mb() do { dsb(); outer_sync(); } while (0) +#define mb() __arm_heavy_mb() #define rmb() dsb() -#define wmb() do { dsb(st); outer_sync(); } while (0) +#define wmb() __arm_heavy_mb(st) #define dma_rmb() dmb(osh) #define dma_wmb() dmb(oshst) #else @@ -67,12 +74,12 @@ do { \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ ___p1; \ @@ -81,7 +88,7 @@ do { \ #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #define smp_mb__before_atomic() smp_mb() #define smp_mb__after_atomic() smp_mb() diff --git a/kernel/arch/arm/include/asm/bitops.h b/kernel/arch/arm/include/asm/bitops.h index 56380995f..e943e6cee 100644 --- a/kernel/arch/arm/include/asm/bitops.h +++ b/kernel/arch/arm/include/asm/bitops.h @@ -35,9 +35,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); *p |= mask; @@ -47,9 +47,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long * static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); *p &= ~mask; @@ -59,9 +59,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); *p ^= mask; @@ -73,9 +73,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; unsigned int res; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); res = *p; @@ -90,9 +90,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; unsigned int res; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); res = *p; @@ -107,9 +107,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) { unsigned long flags; unsigned int res; - unsigned long mask = 1UL << (bit & 31); + unsigned long mask = BIT_MASK(bit); - p += bit >> 5; + p += BIT_WORD(bit); raw_local_irq_save(flags); res = *p; diff --git a/kernel/arch/arm/include/asm/bug.h b/kernel/arch/arm/include/asm/bug.h index b274bde24..e7335a921 100644 --- a/kernel/arch/arm/include/asm/bug.h +++ b/kernel/arch/arm/include/asm/bug.h @@ -40,6 +40,7 @@ do { \ "2:\t.asciz " #__file "\n" \ ".popsection\n" \ ".pushsection __bug_table,\"a\"\n" \ + ".align 2\n" \ "3:\t.word 1b, 2b\n" \ "\t.hword " #__line ", 0\n" \ ".popsection"); \ diff --git a/kernel/arch/arm/include/asm/cacheflush.h b/kernel/arch/arm/include/asm/cacheflush.h index 2d46862e7..d5525bfc7 100644 --- a/kernel/arch/arm/include/asm/cacheflush.h +++ b/kernel/arch/arm/include/asm/cacheflush.h @@ -140,8 +140,6 @@ extern struct cpu_cache_fns cpu_cache; * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -#define dmac_map_area cpu_cache.dma_map_area -#define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_flush_range cpu_cache.dma_flush_range #else @@ -161,8 +159,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t); * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -extern void dmac_map_area(const void *, size_t, int); -extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_flush_range(const void *, const void *); #endif @@ -482,10 +478,17 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size) : : : "r0","r1","r2","r3","r4","r5","r6","r7", \ "r9","r10","lr","memory" ) +#ifdef CONFIG_MMU int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); +#else +static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } +#endif #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); @@ -499,4 +502,21 @@ static inline void set_kernel_text_ro(void) { } void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, void *kaddr, unsigned long len); +/** + * secure_flush_area - ensure coherency across the secure boundary + * @addr: virtual address + * @size: size of region + * + * Ensure that the specified area of memory is coherent across the secure + * boundary from the non-secure side. This is used when calling secure + * firmware where the secure firmware does not ensure coherency. + */ +static inline void secure_flush_area(const void *addr, size_t size) +{ + phys_addr_t phys = __pa(addr); + + __cpuc_flush_dcache_area((void *)addr, size); + outer_flush_range(phys, phys + size); +} + #endif diff --git a/kernel/arch/arm/include/asm/cmpxchg.h b/kernel/arch/arm/include/asm/cmpxchg.h index 2386e9745..97882f9ba 100644 --- a/kernel/arch/arm/include/asm/cmpxchg.h +++ b/kernel/arch/arm/include/asm/cmpxchg.h @@ -35,11 +35,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size unsigned int tmp; #endif - smp_mb(); prefetchw((const void *)ptr); switch (size) { #if __LINUX_ARM_ARCH__ >= 6 +#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */ case 1: asm volatile("@ __xchg1\n" "1: ldrexb %0, [%3]\n" @@ -50,6 +50,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size : "r" (x), "r" (ptr) : "memory", "cc"); break; + case 2: + asm volatile("@ __xchg2\n" + "1: ldrexh %0, [%3]\n" + " strexh %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; +#endif case 4: asm volatile("@ __xchg4\n" "1: ldrex %0, [%3]\n" @@ -94,16 +105,18 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size break; #endif default: + /* Cause a link-time error, the xchg() size is not supported */ __bad_xchg(ptr, size), ret = 0; break; } - smp_mb(); return ret; } -#define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define xchg_relaxed(ptr, x) ({ \ + (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ + sizeof(*(ptr))); \ +}) #include <asm-generic/cmpxchg-local.h> @@ -114,23 +127,25 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size #error "SMP is not supported on this platform" #endif +#define xchg xchg_relaxed + /* * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * them available. */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg_local(ptr, o, n) ({ \ + (__typeof(*ptr))__cmpxchg_local_generic((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr))); \ +}) + #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#ifndef CONFIG_SMP #include <asm-generic/cmpxchg.h> -#endif #else /* min ARCH >= ARMv6 */ -#define __HAVE_ARCH_CMPXCHG 1 - extern void __bad_cmpxchg(volatile void *ptr, int size); /* @@ -191,23 +206,12 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, return oldval; } -static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long ret; - - smp_mb(); - ret = __cmpxchg(ptr, old, new, size); - smp_mb(); - - return ret; -} - -#define cmpxchg(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) +#define cmpxchg_relaxed(ptr,o,n) ({ \ + (__typeof__(*(ptr)))__cmpxchg((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr))); \ +}) static inline unsigned long __cmpxchg_local(volatile void *ptr, unsigned long old, @@ -229,6 +233,13 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, return ret; } +#define cmpxchg_local(ptr, o, n) ({ \ + (__typeof(*ptr))__cmpxchg_local((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr))); \ +}) + static inline unsigned long long __cmpxchg64(unsigned long long *ptr, unsigned long long old, unsigned long long new) @@ -254,36 +265,13 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr, return oldval; } -static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr, - unsigned long long old, - unsigned long long new) -{ - unsigned long long ret; - - smp_mb(); - ret = __cmpxchg64(ptr, old, new); - smp_mb(); - - return ret; -} - -#define cmpxchg_local(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) - -#define cmpxchg64(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ - (unsigned long long)(o), \ - (unsigned long long)(n))) - -#define cmpxchg64_relaxed(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ +#define cmpxchg64_relaxed(ptr, o, n) ({ \ + (__typeof__(*(ptr)))__cmpxchg64((ptr), \ (unsigned long long)(o), \ - (unsigned long long)(n))) + (unsigned long long)(n)); \ +}) -#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) +#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) #endif /* __LINUX_ARM_ARCH__ >= 6 */ diff --git a/kernel/arch/arm/include/asm/dma-mapping.h b/kernel/arch/arm/include/asm/dma-mapping.h index b52101d37..ccb3aa646 100644 --- a/kernel/arch/arm/include/asm/dma-mapping.h +++ b/kernel/arch/arm/include/asm/dma-mapping.h @@ -8,13 +8,12 @@ #include <linux/dma-attrs.h> #include <linux/dma-debug.h> -#include <asm-generic/dma-coherent.h> #include <asm/memory.h> #include <xen/xen.h> #include <asm/xen/hypervisor.h> -#define DMA_ERROR_CODE (~0) +#define DMA_ERROR_CODE (~(dma_addr_t)0x0) extern struct dma_map_ops arm_dma_ops; extern struct dma_map_ops arm_coherent_dma_ops; @@ -39,12 +38,15 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) dev->archdata.dma_ops = ops; } -#include <asm-generic/dma-mapping-common.h> +#define HAVE_ARCH_DMA_SUPPORTED 1 +extern int dma_supported(struct device *dev, u64 mask); -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - return get_dma_ops(dev)->set_dma_mask(dev, mask); -} +/* + * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent + * implementations, we don't provide a dma_cache_sync function so drivers using + * this API are highlighted with build warnings. + */ +#include <asm-generic/dma-mapping-common.h> #ifdef __arch_page_to_dma #error Please update to __arch_pfn_to_dma @@ -167,32 +169,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline void dma_mark_clean(void *addr, size_t size) { } -/* - * DMA errors are defined by all-bits-set in the DMA address. - */ -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - debug_dma_mapping_error(dev, dma_addr); - return dma_addr == DMA_ERROR_CODE; -} - -/* - * Dummy noncoherent implementation. We don't provide a dma_cache_sync - * function so drivers using this API are highlighted with build warnings. - */ -static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp) -{ - return NULL; -} - -static inline void dma_free_noncoherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle) -{ -} - -extern int dma_supported(struct device *dev, u64 mask); - extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); /** @@ -209,21 +185,6 @@ extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *cpu_addr; - BUG_ON(!ops); - - cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - return cpu_addr; -} - /** * arm_dma_free - free memory allocated by arm_dma_alloc * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -241,19 +202,6 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs); -#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - BUG_ON(!ops); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - /** * arm_dma_mmap - map a coherent DMA allocation into user space * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices diff --git a/kernel/arch/arm/include/asm/dma.h b/kernel/arch/arm/include/asm/dma.h index 99084431d..bb4fa67da 100644 --- a/kernel/arch/arm/include/asm/dma.h +++ b/kernel/arch/arm/include/asm/dma.h @@ -19,7 +19,7 @@ * It should not be re-used except for that purpose. */ #include <linux/spinlock.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <mach/isa-dma.h> diff --git a/kernel/arch/arm/include/asm/domain.h b/kernel/arch/arm/include/asm/domain.h index 6ddbe4464..fc8ba1663 100644 --- a/kernel/arch/arm/include/asm/domain.h +++ b/kernel/arch/arm/include/asm/domain.h @@ -12,6 +12,7 @@ #ifndef __ASSEMBLY__ #include <asm/barrier.h> +#include <asm/thread_info.h> #endif /* @@ -34,15 +35,14 @@ */ #ifndef CONFIG_IO_36 #define DOMAIN_KERNEL 0 -#define DOMAIN_TABLE 0 #define DOMAIN_USER 1 #define DOMAIN_IO 2 #else #define DOMAIN_KERNEL 2 -#define DOMAIN_TABLE 2 #define DOMAIN_USER 1 #define DOMAIN_IO 0 #endif +#define DOMAIN_VECTORS 3 /* * Domain types @@ -55,30 +55,65 @@ #define DOMAIN_MANAGER 1 #endif -#define domain_val(dom,type) ((type) << (2*(dom))) +#define domain_mask(dom) ((3) << (2 * (dom))) +#define domain_val(dom,type) ((type) << (2 * (dom))) + +#ifdef CONFIG_CPU_SW_DOMAIN_PAN +#define DACR_INIT \ + (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)) +#else +#define DACR_INIT \ + (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ + domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)) +#endif + +#define __DACR_DEFAULT \ + domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ + domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT) + +#define DACR_UACCESS_DISABLE \ + (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) +#define DACR_UACCESS_ENABLE \ + (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT)) #ifndef __ASSEMBLY__ -#ifdef CONFIG_CPU_USE_DOMAINS +static inline unsigned int get_domain(void) +{ + unsigned int domain; + + asm( + "mrc p15, 0, %0, c3, c0 @ get domain" + : "=r" (domain) + : "m" (current_thread_info()->cpu_domain)); + + return domain; +} + static inline void set_domain(unsigned val) { asm volatile( "mcr p15, 0, %0, c3, c0 @ set domain" - : : "r" (val)); + : : "r" (val) : "memory"); isb(); } +#ifdef CONFIG_CPU_USE_DOMAINS #define modify_domain(dom,type) \ do { \ - struct thread_info *thread = current_thread_info(); \ - unsigned int domain = thread->cpu_domain; \ - domain &= ~domain_val(dom, DOMAIN_MANAGER); \ - thread->cpu_domain = domain | domain_val(dom, type); \ - set_domain(thread->cpu_domain); \ + unsigned int domain = get_domain(); \ + domain &= ~domain_mask(dom); \ + domain = domain | domain_val(dom, type); \ + set_domain(domain); \ } while (0) #else -static inline void set_domain(unsigned val) { } static inline void modify_domain(unsigned dom, unsigned type) { } #endif diff --git a/kernel/arch/arm/include/asm/edac.h b/kernel/arch/arm/include/asm/edac.h index 0df7a2c1f..5189fa819 100644 --- a/kernel/arch/arm/include/asm/edac.h +++ b/kernel/arch/arm/include/asm/edac.h @@ -18,11 +18,12 @@ #define ASM_EDAC_H /* * ECC atomic, DMA, SMP and interrupt safe scrub function. - * Implements the per arch atomic_scrub() that EDAC use for software + * Implements the per arch edac_atomic_scrub() that EDAC use for software * ECC scrubbing. It reads memory and then writes back the original * value, allowing the hardware to detect and correct memory errors. */ -static inline void atomic_scrub(void *va, u32 size) + +static inline void edac_atomic_scrub(void *va, u32 size) { #if __LINUX_ARM_ARCH__ >= 6 unsigned int *virt_addr = va; diff --git a/kernel/arch/arm/include/asm/entry-macro-multi.S b/kernel/arch/arm/include/asm/entry-macro-multi.S index 469a2b30f..609184f52 100644 --- a/kernel/arch/arm/include/asm/entry-macro-multi.S +++ b/kernel/arch/arm/include/asm/entry-macro-multi.S @@ -10,7 +10,7 @@ @ @ routine called with r0 = irq number, r1 = struct pt_regs * @ - adrne lr, BSYM(1b) + badrne lr, 1b bne asm_do_IRQ #ifdef CONFIG_SMP @@ -23,7 +23,7 @@ ALT_SMP(test_for_ipi r0, r2, r6, lr) ALT_UP_B(9997f) movne r1, sp - adrne lr, BSYM(1b) + badrne lr, 1b bne do_IPI #endif 9997: diff --git a/kernel/arch/arm/include/asm/firmware.h b/kernel/arch/arm/include/asm/firmware.h index 89aefe10d..34c1d96ef 100644 --- a/kernel/arch/arm/include/asm/firmware.h +++ b/kernel/arch/arm/include/asm/firmware.h @@ -34,6 +34,10 @@ struct firmware_ops { */ int (*set_cpu_boot_addr)(int cpu, unsigned long boot_addr); /* + * Gets boot address of specified physical CPU + */ + int (*get_cpu_boot_addr)(int cpu, unsigned long *boot_addr); + /* * Boots specified physical CPU */ int (*cpu_boot)(int cpu); diff --git a/kernel/arch/arm/include/asm/fixmap.h b/kernel/arch/arm/include/asm/fixmap.h index 0415eae1d..58cfe9f1a 100644 --- a/kernel/arch/arm/include/asm/fixmap.h +++ b/kernel/arch/arm/include/asm/fixmap.h @@ -6,9 +6,13 @@ #define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) #include <asm/kmap_types.h> +#include <asm/pgtable.h> enum fixed_addresses { - FIX_KMAP_BEGIN, + FIX_EARLYCON_MEM_BASE, + __end_of_permanent_fixed_addresses, + + FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses, FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, /* Support writing RO kernel text via kprobes, jump labels, etc. */ @@ -18,7 +22,16 @@ enum fixed_addresses { __end_of_fixed_addresses }; +#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY) + +#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK) + +/* Used by set_fixmap_(io|nocache), both meant for mapping a device */ +#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED) +#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO + void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); +void __init early_fixmap_init(void); #include <asm-generic/fixmap.h> diff --git a/kernel/arch/arm/include/asm/futex.h b/kernel/arch/arm/include/asm/futex.h index 5eed82809..6795368ad 100644 --- a/kernel/arch/arm/include/asm/futex.h +++ b/kernel/arch/arm/include/asm/futex.h @@ -22,8 +22,11 @@ #ifdef CONFIG_SMP #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ +({ \ + unsigned int __ua_flags; \ smp_mb(); \ prefetchw(uaddr); \ + __ua_flags = uaccess_save_and_enable(); \ __asm__ __volatile__( \ "1: ldrex %1, [%3]\n" \ " " insn "\n" \ @@ -34,12 +37,15 @@ __futex_atomic_ex_table("%5") \ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ - : "cc", "memory") + : "cc", "memory"); \ + uaccess_restore(__ua_flags); \ +}) static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { + unsigned int __ua_flags; int ret; u32 val; @@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, smp_mb(); /* Prefetching cannot fault */ prefetchw(uaddr); + __ua_flags = uaccess_save_and_enable(); __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: ldrex %1, [%4]\n" " teq %1, %2\n" @@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : "=&r" (ret), "=&r" (val) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); + uaccess_restore(__ua_flags); smp_mb(); *uval = val; @@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, #include <asm/domain.h> #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ +({ \ + unsigned int __ua_flags = uaccess_save_and_enable(); \ __asm__ __volatile__( \ "1: " TUSER(ldr) " %1, [%3]\n" \ " " insn "\n" \ @@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, __futex_atomic_ex_table("%5") \ : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ - : "cc", "memory") + : "cc", "memory"); \ + uaccess_restore(__ua_flags); \ +}) static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { + unsigned int __ua_flags; int ret = 0; u32 val; @@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; preempt_disable(); + __ua_flags = uaccess_save_and_enable(); __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: " TUSER(ldr) " %1, [%4]\n" " teq %1, %2\n" @@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : "+r" (ret), "=&r" (val) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); + uaccess_restore(__ua_flags); *uval = val; preempt_enable(); diff --git a/kernel/arch/arm/include/asm/glue-cache.h b/kernel/arch/arm/include/asm/glue-cache.h index a3c24cd5b..cab07f693 100644 --- a/kernel/arch/arm/include/asm/glue-cache.h +++ b/kernel/arch/arm/include/asm/glue-cache.h @@ -158,8 +158,6 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { } #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) -#define dmac_map_area __glue(_CACHE,_dma_map_area) -#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) #define dmac_flush_range __glue(_CACHE,_dma_flush_range) #endif diff --git a/kernel/arch/arm/include/asm/hardware/arm_timer.h b/kernel/arch/arm/include/asm/hardware/arm_timer.h deleted file mode 100644 index d6030ff59..000000000 --- a/kernel/arch/arm/include/asm/hardware/arm_timer.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __ASM_ARM_HARDWARE_ARM_TIMER_H -#define __ASM_ARM_HARDWARE_ARM_TIMER_H - -/* - * ARM timer implementation, found in Integrator, Versatile and Realview - * platforms. Not all platforms support all registers and bits in these - * registers, so we mark them with A for Integrator AP, C for Integrator - * CP, V for Versatile and R for Realview. - * - * Integrator AP has 16-bit timers, Integrator CP, Versatile and Realview - * can have 16-bit or 32-bit selectable via a bit in the control register. - * - * Every SP804 contains two identical timers. - */ -#define TIMER_1_BASE 0x00 -#define TIMER_2_BASE 0x20 - -#define TIMER_LOAD 0x00 /* ACVR rw */ -#define TIMER_VALUE 0x04 /* ACVR ro */ -#define TIMER_CTRL 0x08 /* ACVR rw */ -#define TIMER_CTRL_ONESHOT (1 << 0) /* CVR */ -#define TIMER_CTRL_32BIT (1 << 1) /* CVR */ -#define TIMER_CTRL_DIV1 (0 << 2) /* ACVR */ -#define TIMER_CTRL_DIV16 (1 << 2) /* ACVR */ -#define TIMER_CTRL_DIV256 (2 << 2) /* ACVR */ -#define TIMER_CTRL_IE (1 << 5) /* VR */ -#define TIMER_CTRL_PERIODIC (1 << 6) /* ACVR */ -#define TIMER_CTRL_ENABLE (1 << 7) /* ACVR */ - -#define TIMER_INTCLR 0x0c /* ACVR wo */ -#define TIMER_RIS 0x10 /* CVR ro */ -#define TIMER_MIS 0x14 /* CVR ro */ -#define TIMER_BGLOAD 0x18 /* CVR rw */ - -#endif diff --git a/kernel/arch/arm/include/asm/hardware/cache-uniphier.h b/kernel/arch/arm/include/asm/hardware/cache-uniphier.h new file mode 100644 index 000000000..102e3fbe1 --- /dev/null +++ b/kernel/arch/arm/include/asm/hardware/cache-uniphier.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __CACHE_UNIPHIER_H +#define __CACHE_UNIPHIER_H + +#include <linux/types.h> + +#ifdef CONFIG_CACHE_UNIPHIER +int uniphier_cache_init(void); +int uniphier_cache_l2_is_enabled(void); +void uniphier_cache_l2_touch_range(unsigned long start, unsigned long end); +void uniphier_cache_l2_set_locked_ways(u32 way_mask); +#else +static inline int uniphier_cache_init(void) +{ + return -ENODEV; +} + +static inline int uniphier_cache_l2_is_enabled(void) +{ + return 0; +} + +static inline void uniphier_cache_l2_touch_range(unsigned long start, + unsigned long end) +{ +} + +static inline void uniphier_cache_l2_set_locked_ways(u32 way_mask) +{ +} +#endif + +#endif /* __CACHE_UNIPHIER_H */ diff --git a/kernel/arch/arm/include/asm/hardware/it8152.h b/kernel/arch/arm/include/asm/hardware/it8152.h index d36a73d7c..076777ff3 100644 --- a/kernel/arch/arm/include/asm/hardware/it8152.h +++ b/kernel/arch/arm/include/asm/hardware/it8152.h @@ -106,7 +106,7 @@ extern void __iomem *it8152_base_address; struct pci_dev; struct pci_sys_data; -extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc); +extern void it8152_irq_demux(struct irq_desc *desc); extern void it8152_init_irq(void); extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); extern int it8152_pci_setup(int nr, struct pci_sys_data *sys); diff --git a/kernel/arch/arm/include/asm/hardware/timer-sp.h b/kernel/arch/arm/include/asm/hardware/timer-sp.h deleted file mode 100644 index bb28af7c3..000000000 --- a/kernel/arch/arm/include/asm/hardware/timer-sp.h +++ /dev/null @@ -1,23 +0,0 @@ -struct clk; - -void __sp804_clocksource_and_sched_clock_init(void __iomem *, - const char *, struct clk *, int); -void __sp804_clockevents_init(void __iomem *, unsigned int, - struct clk *, const char *); - -static inline void sp804_clocksource_init(void __iomem *base, const char *name) -{ - __sp804_clocksource_and_sched_clock_init(base, name, NULL, 0); -} - -static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base, - const char *name) -{ - __sp804_clocksource_and_sched_clock_init(base, name, NULL, 1); -} - -static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name) -{ - __sp804_clockevents_init(base, irq, NULL, name); - -} diff --git a/kernel/arch/arm/include/asm/highmem.h b/kernel/arch/arm/include/asm/highmem.h index 535579511..0a0e2d178 100644 --- a/kernel/arch/arm/include/asm/highmem.h +++ b/kernel/arch/arm/include/asm/highmem.h @@ -68,7 +68,6 @@ extern void kunmap(struct page *page); extern void *kmap_atomic(struct page *page); extern void __kunmap_atomic(void *kvaddr); extern void *kmap_atomic_pfn(unsigned long pfn); -extern struct page *kmap_atomic_to_page(const void *ptr); #endif #endif diff --git a/kernel/arch/arm/include/asm/hugetlb.h b/kernel/arch/arm/include/asm/hugetlb.h index 1f1b1cd11..7d26f6c4f 100644 --- a/kernel/arch/arm/include/asm/hugetlb.h +++ b/kernel/arch/arm/include/asm/hugetlb.h @@ -53,10 +53,6 @@ static inline int prepare_hugepage_range(struct file *file, return 0; } -static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) -{ -} - static inline int huge_pte_none(pte_t pte) { return pte_none(pte); @@ -67,15 +63,6 @@ static inline pte_t huge_pte_wrprotect(pte_t pte) return pte_wrprotect(pte); } -static inline int arch_prepare_hugepage(struct page *page) -{ - return 0; -} - -static inline void arch_release_hugepage(struct page *page) -{ -} - static inline void arch_clear_hugepage_flags(struct page *page) { clear_bit(PG_dcache_clean, &page->flags); diff --git a/kernel/arch/arm/include/asm/hw_irq.h b/kernel/arch/arm/include/asm/hw_irq.h index af79da40a..9beb92914 100644 --- a/kernel/arch/arm/include/asm/hw_irq.h +++ b/kernel/arch/arm/include/asm/hw_irq.h @@ -11,12 +11,6 @@ static inline void ack_bad_irq(int irq) pr_crit("unexpected IRQ trap at vector %02x\n", irq); } -void set_irq_flags(unsigned int irq, unsigned int flags); - -#define IRQF_VALID (1 << 0) -#define IRQF_PROBE (1 << 1) -#define IRQF_NOAUTOEN (1 << 2) - #define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE) #endif diff --git a/kernel/arch/arm/include/asm/io.h b/kernel/arch/arm/include/asm/io.h index db58deb00..485982084 100644 --- a/kernel/arch/arm/include/asm/io.h +++ b/kernel/arch/arm/include/asm/io.h @@ -23,6 +23,7 @@ #ifdef __KERNEL__ +#include <linux/string.h> #include <linux/types.h> #include <linux/blk_types.h> #include <asm/byteorder.h> @@ -73,17 +74,16 @@ void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen); static inline void __raw_writew(u16 val, volatile void __iomem *addr) { asm volatile("strh %1, %0" - : "+Q" (*(volatile u16 __force *)addr) - : "r" (val)); + : : "Q" (*(volatile u16 __force *)addr), "r" (val)); } #define __raw_readw __raw_readw static inline u16 __raw_readw(const volatile void __iomem *addr) { u16 val; - asm volatile("ldrh %1, %0" - : "+Q" (*(volatile u16 __force *)addr), - "=r" (val)); + asm volatile("ldrh %0, %1" + : "=r" (val) + : "Q" (*(volatile u16 __force *)addr)); return val; } #endif @@ -92,25 +92,23 @@ static inline u16 __raw_readw(const volatile void __iomem *addr) static inline void __raw_writeb(u8 val, volatile void __iomem *addr) { asm volatile("strb %1, %0" - : "+Qo" (*(volatile u8 __force *)addr) - : "r" (val)); + : : "Qo" (*(volatile u8 __force *)addr), "r" (val)); } #define __raw_writel __raw_writel static inline void __raw_writel(u32 val, volatile void __iomem *addr) { asm volatile("str %1, %0" - : "+Qo" (*(volatile u32 __force *)addr) - : "r" (val)); + : : "Qo" (*(volatile u32 __force *)addr), "r" (val)); } #define __raw_readb __raw_readb static inline u8 __raw_readb(const volatile void __iomem *addr) { u8 val; - asm volatile("ldrb %1, %0" - : "+Qo" (*(volatile u8 __force *)addr), - "=r" (val)); + asm volatile("ldrb %0, %1" + : "=r" (val) + : "Qo" (*(volatile u8 __force *)addr)); return val; } @@ -118,9 +116,9 @@ static inline u8 __raw_readb(const volatile void __iomem *addr) static inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; - asm volatile("ldr %1, %0" - : "+Qo" (*(volatile u32 __force *)addr), - "=r" (val)); + asm volatile("ldr %0, %1" + : "=r" (val) + : "Qo" (*(volatile u32 __force *)addr)); return val; } @@ -142,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) * The _caller variety takes a __builtin_return_address(0) value for * /proc/vmalloc to use - and should only be used in non-inline functions. */ -extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, - size_t, unsigned int, void *); extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int, void *); - extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); -extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int); extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached); extern void __iounmap(volatile void __iomem *addr); -extern void __arm_iounmap(volatile void __iomem *addr); extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); @@ -319,24 +312,95 @@ extern void _memset_io(volatile void __iomem *, int, size_t); #define writesw(p,d,l) __raw_writesw(p,d,l) #define writesl(p,d,l) __raw_writesl(p,d,l) +#ifndef __ARMBE__ +static inline void memset_io(volatile void __iomem *dst, unsigned c, + size_t count) +{ + extern void mmioset(void *, unsigned int, size_t); + mmioset((void __force *)dst, c, count); +} +#define memset_io(dst,c,count) memset_io(dst,c,count) + +static inline void memcpy_fromio(void *to, const volatile void __iomem *from, + size_t count) +{ + extern void mmiocpy(void *, const void *, size_t); + mmiocpy(to, (const void __force *)from, count); +} +#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count) + +static inline void memcpy_toio(volatile void __iomem *to, const void *from, + size_t count) +{ + extern void mmiocpy(void *, const void *, size_t); + mmiocpy((void __force *)to, from, count); +} +#define memcpy_toio(to,from,count) memcpy_toio(to,from,count) + +#else #define memset_io(c,v,l) _memset_io(c,(v),(l)) #define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l)) #define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l)) +#endif #endif /* readl */ /* - * ioremap and friends. + * ioremap() and friends. + * + * ioremap() takes a resource address, and size. Due to the ARM memory + * types, it is important to use the correct ioremap() function as each + * mapping has specific properties. + * + * Function Memory type Cacheability Cache hint + * ioremap() Device n/a n/a + * ioremap_nocache() Device n/a n/a + * ioremap_cache() Normal Writeback Read allocate + * ioremap_wc() Normal Non-cacheable n/a + * ioremap_wt() Normal Non-cacheable n/a + * + * All device mappings have the following properties: + * - no access speculation + * - no repetition (eg, on return from an exception) + * - number, order and size of accesses are maintained + * - unaligned accesses are "unpredictable" + * - writes may be delayed before they hit the endpoint device * - * ioremap takes a PCI memory address, as specified in - * Documentation/io-mapping.txt. + * ioremap_nocache() is the same as ioremap() as there are too many device + * drivers using this for device registers, and documentation which tells + * people to use it for such for this to be any different. This is not a + * safe fallback for memory-like mappings, or memory regions where the + * compiler may generate unaligned accesses - eg, via inlining its own + * memcpy. * + * All normal memory mappings have the following properties: + * - reads can be repeated with no side effects + * - repeated reads return the last value written + * - reads can fetch additional locations without side effects + * - writes can be repeated (in certain cases) with no side effects + * - writes can be merged before accessing the target + * - unaligned accesses can be supported + * - ordering is not guaranteed without explicit dependencies or barrier + * instructions + * - writes may be delayed before they hit the endpoint memory + * + * The cache hint is only a performance hint: CPUs may alias these hints. + * Eg, a CPU not implementing read allocate but implementing write allocate + * will provide a write allocate mapping instead. */ -#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) -#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) -#define iounmap __arm_iounmap +void __iomem *ioremap(resource_size_t res_cookie, size_t size); +#define ioremap ioremap +#define ioremap_nocache ioremap + +void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size); +#define ioremap_cache ioremap_cache + +void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size); +#define ioremap_wc ioremap_wc +#define ioremap_wt ioremap_wc + +void iounmap(volatile void __iomem *iomem_cookie); +#define iounmap iounmap /* * io{read,write}{16,32}be() macros diff --git a/kernel/arch/arm/include/asm/irq.h b/kernel/arch/arm/include/asm/irq.h index 53c15dec7..1bd9510de 100644 --- a/kernel/arch/arm/include/asm/irq.h +++ b/kernel/arch/arm/include/asm/irq.h @@ -35,6 +35,16 @@ extern void (*handle_arch_irq)(struct pt_regs *); extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); #endif +#ifdef CONFIG_SMP +extern void arch_trigger_all_cpu_backtrace(bool); +#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x) +#endif + +static inline int nr_legacy_irqs(void) +{ + return NR_IRQS_LEGACY; +} + #endif #endif diff --git a/kernel/arch/arm/include/asm/irqflags.h b/kernel/arch/arm/include/asm/irqflags.h index 3b763d665..e6b70d9d0 100644 --- a/kernel/arch/arm/include/asm/irqflags.h +++ b/kernel/arch/arm/include/asm/irqflags.h @@ -20,6 +20,7 @@ #if __LINUX_ARM_ARCH__ >= 6 +#define arch_local_irq_save arch_local_irq_save static inline unsigned long arch_local_irq_save(void) { unsigned long flags; @@ -31,6 +32,7 @@ static inline unsigned long arch_local_irq_save(void) return flags; } +#define arch_local_irq_enable arch_local_irq_enable static inline void arch_local_irq_enable(void) { asm volatile( @@ -40,6 +42,7 @@ static inline void arch_local_irq_enable(void) : "memory", "cc"); } +#define arch_local_irq_disable arch_local_irq_disable static inline void arch_local_irq_disable(void) { asm volatile( @@ -51,11 +54,20 @@ static inline void arch_local_irq_disable(void) #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") + +#ifndef CONFIG_CPU_V7M +#define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc") +#define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc") +#else +#define local_abt_enable() do { } while (0) +#define local_abt_disable() do { } while (0) +#endif #else /* * Save the current interrupt enable state & disable IRQs */ +#define arch_local_irq_save arch_local_irq_save static inline unsigned long arch_local_irq_save(void) { unsigned long flags, temp; @@ -73,6 +85,7 @@ static inline unsigned long arch_local_irq_save(void) /* * Enable IRQs */ +#define arch_local_irq_enable arch_local_irq_enable static inline void arch_local_irq_enable(void) { unsigned long temp; @@ -88,6 +101,7 @@ static inline void arch_local_irq_enable(void) /* * Disable IRQs */ +#define arch_local_irq_disable arch_local_irq_disable static inline void arch_local_irq_disable(void) { unsigned long temp; @@ -130,11 +144,14 @@ static inline void arch_local_irq_disable(void) : "memory", "cc"); \ }) +#define local_abt_enable() do { } while (0) +#define local_abt_disable() do { } while (0) #endif /* * Save the current interrupt enable state. */ +#define arch_local_save_flags arch_local_save_flags static inline unsigned long arch_local_save_flags(void) { unsigned long flags; @@ -147,6 +164,7 @@ static inline unsigned long arch_local_save_flags(void) /* * restore saved IRQ & FIQ state */ +#define arch_local_irq_restore arch_local_irq_restore static inline void arch_local_irq_restore(unsigned long flags) { asm volatile( @@ -156,10 +174,13 @@ static inline void arch_local_irq_restore(unsigned long flags) : "memory", "cc"); } +#define arch_irqs_disabled_flags arch_irqs_disabled_flags static inline int arch_irqs_disabled_flags(unsigned long flags) { return flags & IRQMASK_I_BIT; } +#include <asm-generic/irqflags.h> + #endif /* ifdef __KERNEL__ */ #endif /* ifndef __ASM_ARM_IRQFLAGS_H */ diff --git a/kernel/arch/arm/include/asm/jump_label.h b/kernel/arch/arm/include/asm/jump_label.h index 5f337dc5c..34f7b6980 100644 --- a/kernel/arch/arm/include/asm/jump_label.h +++ b/kernel/arch/arm/include/asm/jump_label.h @@ -4,23 +4,32 @@ #ifndef __ASSEMBLY__ #include <linux/types.h> +#include <asm/unified.h> #define JUMP_LABEL_NOP_SIZE 4 -#ifdef CONFIG_THUMB2_KERNEL -#define JUMP_LABEL_NOP "nop.w" -#else -#define JUMP_LABEL_NOP "nop" -#endif +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +{ + asm_volatile_goto("1:\n\t" + WASM(nop) "\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".word 1b, %l[l_yes], %c0\n\t" + ".popsection\n\t" + : : "i" (&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} -static __always_inline bool arch_static_branch(struct static_key *key) +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) { asm_volatile_goto("1:\n\t" - JUMP_LABEL_NOP "\n\t" + WASM(b) " %l[l_yes]\n\t" ".pushsection __jump_table, \"aw\"\n\t" ".word 1b, %l[l_yes], %c0\n\t" ".popsection\n\t" - : : "i" (key) : : l_yes); + : : "i" (&((char *)key)[branch]) : : l_yes); return false; l_yes: diff --git a/kernel/arch/arm/include/asm/kvm_arm.h b/kernel/arch/arm/include/asm/kvm_arm.h index d995821f1..dc641ddf0 100644 --- a/kernel/arch/arm/include/asm/kvm_arm.h +++ b/kernel/arch/arm/include/asm/kvm_arm.h @@ -218,4 +218,24 @@ #define HSR_DABT_CM (1U << 8) #define HSR_DABT_EA (1U << 9) +#define kvm_arm_exception_type \ + {0, "RESET" }, \ + {1, "UNDEFINED" }, \ + {2, "SOFTWARE" }, \ + {3, "PREF_ABORT" }, \ + {4, "DATA_ABORT" }, \ + {5, "IRQ" }, \ + {6, "FIQ" }, \ + {7, "HVC" } + +#define HSRECN(x) { HSR_EC_##x, #x } + +#define kvm_arm_exception_class \ + HSRECN(UNKNOWN), HSRECN(WFI), HSRECN(CP15_32), HSRECN(CP15_64), \ + HSRECN(CP14_MR), HSRECN(CP14_LS), HSRECN(CP_0_13), HSRECN(CP10_ID), \ + HSRECN(JAZELLE), HSRECN(BXJ), HSRECN(CP14_64), HSRECN(SVC_HYP), \ + HSRECN(HVC), HSRECN(SMC), HSRECN(IABT), HSRECN(IABT_HYP), \ + HSRECN(DABT), HSRECN(DABT_HYP) + + #endif /* __ARM_KVM_ARM_H__ */ diff --git a/kernel/arch/arm/include/asm/kvm_asm.h b/kernel/arch/arm/include/asm/kvm_asm.h index 25410b2d8..194c91b61 100644 --- a/kernel/arch/arm/include/asm/kvm_asm.h +++ b/kernel/arch/arm/include/asm/kvm_asm.h @@ -23,7 +23,7 @@ #define c0_MPIDR 1 /* MultiProcessor ID Register */ #define c0_CSSELR 2 /* Cache Size Selection Register */ #define c1_SCTLR 3 /* System Control Register */ -#define c1_ACTLR 4 /* Auxilliary Control Register */ +#define c1_ACTLR 4 /* Auxiliary Control Register */ #define c1_CPACR 5 /* Coprocessor Access Control */ #define c2_TTBR0 6 /* Translation Table Base Register 0 */ #define c2_TTBR0_high 7 /* TTBR0 top 32 bits */ diff --git a/kernel/arch/arm/include/asm/kvm_emulate.h b/kernel/arch/arm/include/asm/kvm_emulate.h index a9c80a2ea..3095df091 100644 --- a/kernel/arch/arm/include/asm/kvm_emulate.h +++ b/kernel/arch/arm/include/asm/kvm_emulate.h @@ -28,6 +28,18 @@ unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); +static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, + u8 reg_num) +{ + return *vcpu_reg(vcpu, reg_num); +} + +static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, + unsigned long val) +{ + *vcpu_reg(vcpu, reg_num) = val; +} + bool kvm_condition_valid(struct kvm_vcpu *vcpu); void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); void kvm_inject_undefined(struct kvm_vcpu *vcpu); diff --git a/kernel/arch/arm/include/asm/kvm_host.h b/kernel/arch/arm/include/asm/kvm_host.h index d71607c16..6692982c9 100644 --- a/kernel/arch/arm/include/asm/kvm_host.h +++ b/kernel/arch/arm/include/asm/kvm_host.h @@ -29,21 +29,18 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED -#if defined(CONFIG_KVM_ARM_MAX_VCPUS) -#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS -#else -#define KVM_MAX_VCPUS 0 -#endif - #define KVM_USER_MEM_SLOTS 32 #define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_HAVE_ONE_REG +#define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_VCPU_MAX_FEATURES 2 #include <kvm/arm_vgic.h> +#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS + u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); @@ -129,7 +126,10 @@ struct kvm_vcpu_arch { * here. */ - /* Don't run the guest on this vcpu */ + /* vcpu power-off state */ + bool power_off; + + /* Don't run the guest (internal implementation need) */ bool pause; /* IO related fields */ @@ -148,6 +148,7 @@ struct kvm_vm_stat { struct kvm_vcpu_stat { u32 halt_successful_poll; + u32 halt_attempted_poll; u32 halt_wakeup; }; @@ -218,11 +219,6 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext) return 0; } -static inline void vgic_arch_setup(const struct vgic_params *vgic) -{ - BUG_ON(vgic->type != VGIC_V2); -} - int kvm_perf_init(void); int kvm_perf_teardown(void); @@ -236,4 +232,9 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arm_init_debug(void) {} +static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} +static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} +static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/kernel/arch/arm/include/asm/mach/arch.h b/kernel/arch/arm/include/asm/mach/arch.h index 0406cb3f1..5c1ad11aa 100644 --- a/kernel/arch/arm/include/asm/mach/arch.h +++ b/kernel/arch/arm/include/asm/mach/arch.h @@ -47,11 +47,11 @@ struct machine_desc { unsigned l2c_aux_val; /* L2 cache aux value */ unsigned l2c_aux_mask; /* L2 cache aux mask */ void (*l2c_write_sec)(unsigned long, unsigned); - struct smp_operations *smp; /* SMP operations */ + const struct smp_operations *smp; /* SMP operations */ bool (*smp_init)(void); void (*fixup)(struct tag *, char **); void (*dt_fixup)(void); - void (*init_meminfo)(void); + long long (*pv_fixup)(void); void (*reserve)(void);/* reserve mem blocks */ void (*map_io)(void);/* IO mapping function */ void (*init_early)(void); diff --git a/kernel/arch/arm/include/asm/mach/irq.h b/kernel/arch/arm/include/asm/mach/irq.h index 2092ee1e1..de4634b51 100644 --- a/kernel/arch/arm/include/asm/mach/irq.h +++ b/kernel/arch/arm/include/asm/mach/irq.h @@ -23,10 +23,10 @@ extern int show_fiq_list(struct seq_file *, int); /* * This is for easy migration, but should be changed in the source */ -#define do_bad_IRQ(irq,desc) \ +#define do_bad_IRQ(desc) \ do { \ raw_spin_lock(&desc->lock); \ - handle_bad_irq(irq, desc); \ + handle_bad_irq(desc); \ raw_spin_unlock(&desc->lock); \ } while(0) diff --git a/kernel/arch/arm/include/asm/mach/pci.h b/kernel/arch/arm/include/asm/mach/pci.h index 28b9bb359..0070e8520 100644 --- a/kernel/arch/arm/include/asm/mach/pci.h +++ b/kernel/arch/arm/include/asm/mach/pci.h @@ -19,9 +19,7 @@ struct pci_bus; struct device; struct hw_pci { -#ifdef CONFIG_PCI_MSI struct msi_controller *msi_ctrl; -#endif struct pci_ops *ops; int nr_controllers; void **private_data; @@ -42,9 +40,6 @@ struct hw_pci { * Per-controller structure */ struct pci_sys_data { -#ifdef CONFIG_PCI_MSI - struct msi_controller *msi_ctrl; -#endif struct list_head node; int busnr; /* primary bus number */ u64 mem_offset; /* bus->cpu memory mapping offset */ @@ -57,12 +52,6 @@ struct pci_sys_data { u8 (*swizzle)(struct pci_dev *, u8 *); /* IRQ mapping */ int (*map_irq)(const struct pci_dev *, u8, u8); - /* Resource alignement requirements */ - resource_size_t (*align_resource)(struct pci_dev *dev, - const struct resource *res, - resource_size_t start, - resource_size_t size, - resource_size_t align); void *private_data; /* platform controller private data */ }; diff --git a/kernel/arch/arm/include/asm/mcpm.h b/kernel/arch/arm/include/asm/mcpm.h index 50b378f59..acd4983d9 100644 --- a/kernel/arch/arm/include/asm/mcpm.h +++ b/kernel/arch/arm/include/asm/mcpm.h @@ -137,17 +137,12 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); /** * mcpm_cpu_suspend - bring the calling CPU in a suspended state * - * @expected_residency: duration in microseconds the CPU is expected - * to remain suspended, or 0 if unknown/infinity. - * - * The calling CPU is suspended. The expected residency argument is used - * as a hint by the platform specific backend to implement the appropriate - * sleep state level according to the knowledge it has on wake-up latency - * for the given hardware. + * The calling CPU is suspended. This is similar to mcpm_cpu_power_down() + * except for possible extra platform specific configuration steps to allow + * an asynchronous wake-up e.g. with a pending interrupt. * * If this CPU is found to be the "last man standing" in the cluster - * then the cluster may be prepared for power-down too, if the expected - * residency makes it worthwhile. + * then the cluster may be prepared for power-down too. * * This must be called with interrupts disabled. * @@ -157,7 +152,7 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); * This will return if mcpm_platform_register() has not been called * previously in which case the caller should take appropriate action. */ -void mcpm_cpu_suspend(u64 expected_residency); +void mcpm_cpu_suspend(void); /** * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up @@ -234,12 +229,6 @@ struct mcpm_platform_ops { void (*cpu_is_up)(unsigned int cpu, unsigned int cluster); void (*cluster_is_up)(unsigned int cluster); int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster); - - /* deprecated callbacks */ - int (*power_up)(unsigned int cpu, unsigned int cluster); - void (*power_down)(void); - void (*suspend)(u64); - void (*powered_up)(void); }; /** @@ -251,35 +240,6 @@ struct mcpm_platform_ops { */ int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); -/* Synchronisation structures for coordinating safe cluster setup/teardown: */ - -/* - * When modifying this structure, make sure you update the MCPM_SYNC_ defines - * to match. - */ -struct mcpm_sync_struct { - /* individual CPU states */ - struct { - s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); - } cpus[MAX_CPUS_PER_CLUSTER]; - - /* cluster state */ - s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); - - /* inbound-side state */ - s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); -}; - -struct sync_struct { - struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; -}; - -void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster); -void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster); -void __mcpm_outbound_leave_critical(unsigned int cluster, int state); -bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); -int __mcpm_cluster_state(unsigned int cluster); - /** * mcpm_sync_init - Initialize the cluster synchronization support * @@ -318,6 +278,29 @@ int __init mcpm_loopback(void (*cache_disable)(void)); void __init mcpm_smp_set_ops(void); +/* + * Synchronisation structures for coordinating safe cluster setup/teardown. + * This is private to the MCPM core code and shared between C and assembly. + * When modifying this structure, make sure you update the MCPM_SYNC_ defines + * to match. + */ +struct mcpm_sync_struct { + /* individual CPU states */ + struct { + s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); + } cpus[MAX_CPUS_PER_CLUSTER]; + + /* cluster state */ + s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); + + /* inbound-side state */ + s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); +}; + +struct sync_struct { + struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; +}; + #else /* diff --git a/kernel/arch/arm/include/asm/memory.h b/kernel/arch/arm/include/asm/memory.h index 184def0e1..c79b57bf7 100644 --- a/kernel/arch/arm/include/asm/memory.h +++ b/kernel/arch/arm/include/asm/memory.h @@ -18,8 +18,6 @@ #include <linux/types.h> #include <linux/sizes.h> -#include <asm/cache.h> - #ifdef CONFIG_NEED_MACH_MEMORY_H #include <mach/memory.h> #endif @@ -78,10 +76,12 @@ */ #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) +#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) /* * Allow 16MB-aligned ioremap pages */ #define IOREMAP_MAX_ORDER 24 +#endif #else /* CONFIG_MMU */ @@ -121,32 +121,12 @@ #endif /* - * Convert a physical address to a Page Frame Number and back - */ -#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) -#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) - -/* * Convert a page to/from a physical address */ #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) /* - * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed - * around in head.S and proc-*.S are shifted by this amount, in order to - * leave spare high bits for systems with physical address extension. This - * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but - * gives us about 38-bits or so. - */ -#ifdef CONFIG_ARM_LPAE -#define ARCH_PGD_SHIFT L1_CACHE_SHIFT -#else -#define ARCH_PGD_SHIFT 0 -#endif -#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) - -/* * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical * memory. This is used for XIP and NoMMU kernels, and on platforms that don't * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use @@ -291,7 +271,7 @@ static inline void *phys_to_virt(phys_addr_t x) */ #define __pa(x) __virt_to_phys((unsigned long)(x)) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) -#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) +#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); @@ -302,7 +282,7 @@ extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); */ static inline phys_addr_t __virt_to_idmap(unsigned long x) { - if (arch_virt_to_idmap) + if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap) return arch_virt_to_idmap(x); else return __virt_to_phys(x); diff --git a/kernel/arch/arm/include/asm/module.h b/kernel/arch/arm/include/asm/module.h index ed690c49e..e358b7966 100644 --- a/kernel/arch/arm/include/asm/module.h +++ b/kernel/arch/arm/include/asm/module.h @@ -16,11 +16,21 @@ enum { ARM_SEC_UNLIKELY, ARM_SEC_MAX, }; +#endif struct mod_arch_specific { +#ifdef CONFIG_ARM_UNWIND struct unwind_table *unwind[ARM_SEC_MAX]; -}; #endif +#ifdef CONFIG_ARM_MODULE_PLTS + struct elf32_shdr *core_plt; + struct elf32_shdr *init_plt; + int core_plt_count; + int init_plt_count; +#endif +}; + +u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val); /* * Add the ARM architecture version to the version magic string diff --git a/kernel/arch/arm/include/asm/outercache.h b/kernel/arch/arm/include/asm/outercache.h index 563b92fc2..c2bf24f40 100644 --- a/kernel/arch/arm/include/asm/outercache.h +++ b/kernel/arch/arm/include/asm/outercache.h @@ -129,21 +129,4 @@ static inline void outer_resume(void) { } #endif -#ifdef CONFIG_OUTER_CACHE_SYNC -/** - * outer_sync - perform a sync point for outer cache - * - * Ensure that all outer cache operations are complete and any store - * buffers are drained. - */ -static inline void outer_sync(void) -{ - if (outer_cache.sync) - outer_cache.sync(); -} -#else -static inline void outer_sync(void) -{ } -#endif - #endif /* __ASM_OUTERCACHE_H */ diff --git a/kernel/arch/arm/include/asm/pci.h b/kernel/arch/arm/include/asm/pci.h index 585dc33a7..a5635444c 100644 --- a/kernel/arch/arm/include/asm/pci.h +++ b/kernel/arch/arm/include/asm/pci.h @@ -31,16 +31,6 @@ static inline int pci_proc_domain(struct pci_bus *bus) */ #define PCI_DMA_BUS_IS_PHYS (1) -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - *strat = PCI_DMA_BURST_INFINITY; - *strategy_parameter = ~0UL; -} -#endif - #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); diff --git a/kernel/arch/arm/include/asm/perf_event.h b/kernel/arch/arm/include/asm/perf_event.h index d9cf138fd..4f9dec489 100644 --- a/kernel/arch/arm/include/asm/perf_event.h +++ b/kernel/arch/arm/include/asm/perf_event.h @@ -19,4 +19,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_misc_flags(regs) perf_misc_flags(regs) #endif +#define perf_arch_fetch_caller_regs(regs, __ip) { \ + (regs)->ARM_pc = (__ip); \ + (regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \ + (regs)->ARM_sp = current_stack_pointer; \ + (regs)->ARM_cpsr = SVC_MODE; \ +} + #endif /* __ARM_PERF_EVENT_H__ */ diff --git a/kernel/arch/arm/include/asm/pgtable-2level-hwdef.h b/kernel/arch/arm/include/asm/pgtable-2level-hwdef.h index 5e68278e9..d0131ee6f 100644 --- a/kernel/arch/arm/include/asm/pgtable-2level-hwdef.h +++ b/kernel/arch/arm/include/asm/pgtable-2level-hwdef.h @@ -23,6 +23,7 @@ #define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ #define PMD_BIT4 (_AT(pmdval_t, 1) << 4) #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) +#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f) #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ /* * - section diff --git a/kernel/arch/arm/include/asm/pgtable-2level.h b/kernel/arch/arm/include/asm/pgtable-2level.h index bfd662e49..aeddd28b3 100644 --- a/kernel/arch/arm/include/asm/pgtable-2level.h +++ b/kernel/arch/arm/include/asm/pgtable-2level.h @@ -129,7 +129,36 @@ /* * These are the memory types, defined to be compatible with - * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB + * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B + * ARMv6+ without TEX remapping, they are a table index. + * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B + * + * MT type Pre-ARMv6 ARMv6+ type / cacheable status + * UNCACHED Uncached Strongly ordered + * BUFFERABLE Bufferable Normal memory / non-cacheable + * WRITETHROUGH Writethrough Normal memory / write through + * WRITEBACK Writeback Normal memory / write back, read alloc + * MINICACHE Minicache N/A + * WRITEALLOC Writeback Normal memory / write back, write alloc + * DEV_SHARED Uncached Device memory (shared) + * DEV_NONSHARED Uncached Device memory (non-shared) + * DEV_WC Bufferable Normal memory / non-cacheable + * DEV_CACHED Writeback Normal memory / write back, read alloc + * VECTORS Variable Normal memory / variable + * + * All normal memory mappings have the following properties: + * - reads can be repeated with no side effects + * - repeated reads return the last value written + * - reads can fetch additional locations without side effects + * - writes can be repeated (in certain cases) with no side effects + * - writes can be merged before accessing the target + * - unaligned accesses can be supported + * + * All device mappings have the following properties: + * - no access speculation + * - no repetition (eg, on return from an exception) + * - number, order and size of accesses are maintained + * - unaligned accesses are "unpredictable" */ #define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ #define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ diff --git a/kernel/arch/arm/include/asm/pgtable.h b/kernel/arch/arm/include/asm/pgtable.h index f40354198..348caabb7 100644 --- a/kernel/arch/arm/include/asm/pgtable.h +++ b/kernel/arch/arm/include/asm/pgtable.h @@ -43,7 +43,7 @@ */ #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#define VMALLOC_END 0xff000000UL +#define VMALLOC_END 0xff800000UL #define LIBRARY_TEXT_START 0x0c000000 diff --git a/kernel/arch/arm/include/asm/pmu.h b/kernel/arch/arm/include/asm/pmu.h deleted file mode 100644 index 675e4ab79..000000000 --- a/kernel/arch/arm/include/asm/pmu.h +++ /dev/null @@ -1,163 +0,0 @@ -/* - * linux/arch/arm/include/asm/pmu.h - * - * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#ifndef __ARM_PMU_H__ -#define __ARM_PMU_H__ - -#include <linux/interrupt.h> -#include <linux/perf_event.h> - -#include <asm/cputype.h> - -/* - * struct arm_pmu_platdata - ARM PMU platform data - * - * @handle_irq: an optional handler which will be called from the - * interrupt and passed the address of the low level handler, - * and can be used to implement any platform specific handling - * before or after calling it. - * @runtime_resume: an optional handler which will be called by the - * runtime PM framework following a call to pm_runtime_get(). - * Note that if pm_runtime_get() is called more than once in - * succession this handler will only be called once. - * @runtime_suspend: an optional handler which will be called by the - * runtime PM framework following a call to pm_runtime_put(). - * Note that if pm_runtime_get() is called more than once in - * succession this handler will only be called following the - * final call to pm_runtime_put() that actually disables the - * hardware. - */ -struct arm_pmu_platdata { - irqreturn_t (*handle_irq)(int irq, void *dev, - irq_handler_t pmu_handler); - int (*runtime_resume)(struct device *dev); - int (*runtime_suspend)(struct device *dev); -}; - -#ifdef CONFIG_HW_PERF_EVENTS - -/* - * The ARMv7 CPU PMU supports up to 32 event counters. - */ -#define ARMPMU_MAX_HWEVENTS 32 - -#define HW_OP_UNSUPPORTED 0xFFFF -#define C(_x) PERF_COUNT_HW_CACHE_##_x -#define CACHE_OP_UNSUPPORTED 0xFFFF - -#define PERF_MAP_ALL_UNSUPPORTED \ - [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED - -#define PERF_CACHE_MAP_ALL_UNSUPPORTED \ -[0 ... C(MAX) - 1] = { \ - [0 ... C(OP_MAX) - 1] = { \ - [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \ - }, \ -} - -/* The events for a given PMU register set. */ -struct pmu_hw_events { - /* - * The events that are active on the PMU for the given index. - */ - struct perf_event *events[ARMPMU_MAX_HWEVENTS]; - - /* - * A 1 bit for an index indicates that the counter is being used for - * an event. A 0 means that the counter can be used. - */ - DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS); - - /* - * Hardware lock to serialize accesses to PMU registers. Needed for the - * read/modify/write sequences. - */ - raw_spinlock_t pmu_lock; - - /* - * When using percpu IRQs, we need a percpu dev_id. Place it here as we - * already have to allocate this struct per cpu. - */ - struct arm_pmu *percpu_pmu; -}; - -struct arm_pmu { - struct pmu pmu; - cpumask_t active_irqs; - int *irq_affinity; - char *name; - irqreturn_t (*handle_irq)(int irq_num, void *dev); - void (*enable)(struct perf_event *event); - void (*disable)(struct perf_event *event); - int (*get_event_idx)(struct pmu_hw_events *hw_events, - struct perf_event *event); - void (*clear_event_idx)(struct pmu_hw_events *hw_events, - struct perf_event *event); - int (*set_event_filter)(struct hw_perf_event *evt, - struct perf_event_attr *attr); - u32 (*read_counter)(struct perf_event *event); - void (*write_counter)(struct perf_event *event, u32 val); - void (*start)(struct arm_pmu *); - void (*stop)(struct arm_pmu *); - void (*reset)(void *); - int (*request_irq)(struct arm_pmu *, irq_handler_t handler); - void (*free_irq)(struct arm_pmu *); - int (*map_event)(struct perf_event *event); - int num_events; - atomic_t active_events; - struct mutex reserve_mutex; - u64 max_period; - struct platform_device *plat_device; - struct pmu_hw_events __percpu *hw_events; - struct notifier_block hotplug_nb; -}; - -#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) - -extern const struct dev_pm_ops armpmu_dev_pm_ops; - -int armpmu_register(struct arm_pmu *armpmu, int type); - -u64 armpmu_event_update(struct perf_event *event); - -int armpmu_event_set_period(struct perf_event *event); - -int armpmu_map_event(struct perf_event *event, - const unsigned (*event_map)[PERF_COUNT_HW_MAX], - const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX], - u32 raw_event_mask); - -struct pmu_probe_info { - unsigned int cpuid; - unsigned int mask; - int (*init)(struct arm_pmu *); -}; - -#define PMU_PROBE(_cpuid, _mask, _fn) \ -{ \ - .cpuid = (_cpuid), \ - .mask = (_mask), \ - .init = (_fn), \ -} - -#define ARM_PMU_PROBE(_cpuid, _fn) \ - PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn) - -#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK) - -#define XSCALE_PMU_PROBE(_version, _fn) \ - PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn) - -#endif /* CONFIG_HW_PERF_EVENTS */ - -#endif /* __ARM_PMU_H__ */ diff --git a/kernel/arch/arm/include/asm/proc-fns.h b/kernel/arch/arm/include/asm/proc-fns.h index 5324c1112..8877ad5ff 100644 --- a/kernel/arch/arm/include/asm/proc-fns.h +++ b/kernel/arch/arm/include/asm/proc-fns.h @@ -125,13 +125,6 @@ extern void cpu_resume(void); ttbr; \ }) -#define cpu_set_ttbr(nr, val) \ - do { \ - u64 ttbr = val; \ - __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \ - : : "r" (ttbr)); \ - } while (0) - #define cpu_get_pgd() \ ({ \ u64 pg = cpu_get_ttbr(0); \ diff --git a/kernel/arch/arm/include/asm/psci.h b/kernel/arch/arm/include/asm/psci.h index c25ef3ec6..b4c6d9936 100644 --- a/kernel/arch/arm/include/asm/psci.h +++ b/kernel/arch/arm/include/asm/psci.h @@ -14,34 +14,11 @@ #ifndef __ASM_ARM_PSCI_H #define __ASM_ARM_PSCI_H -#define PSCI_POWER_STATE_TYPE_STANDBY 0 -#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 - -struct psci_power_state { - u16 id; - u8 type; - u8 affinity_level; -}; - -struct psci_operations { - int (*cpu_suspend)(struct psci_power_state state, - unsigned long entry_point); - int (*cpu_off)(struct psci_power_state state); - int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); - int (*migrate)(unsigned long cpuid); - int (*affinity_info)(unsigned long target_affinity, - unsigned long lowest_affinity_level); - int (*migrate_info_type)(void); -}; - -extern struct psci_operations psci_ops; extern struct smp_operations psci_smp_ops; -#ifdef CONFIG_ARM_PSCI -int psci_init(void); +#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI) bool psci_smp_available(void); #else -static inline int psci_init(void) { return 0; } static inline bool psci_smp_available(void) { return false; } #endif diff --git a/kernel/arch/arm/include/asm/smp.h b/kernel/arch/arm/include/asm/smp.h index 18f5a5541..3d6dc8b46 100644 --- a/kernel/arch/arm/include/asm/smp.h +++ b/kernel/arch/arm/include/asm/smp.h @@ -61,7 +61,7 @@ asmlinkage void secondary_start_kernel(void); struct secondary_data { union { unsigned long mpu_rgn_szr; - unsigned long pgdir; + u64 pgdir; }; unsigned long swapper_pg_dir; void *stack; @@ -69,11 +69,11 @@ struct secondary_data { extern struct secondary_data secondary_data; extern volatile int pen_release; extern void secondary_startup(void); +extern void secondary_startup_arm(void); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); -extern void cpu_die(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); @@ -104,6 +104,7 @@ struct smp_operations { #ifdef CONFIG_HOTPLUG_CPU int (*cpu_kill)(unsigned int cpu); void (*cpu_die)(unsigned int cpu); + bool (*cpu_can_disable)(unsigned int cpu); int (*cpu_disable)(unsigned int cpu); #endif #endif @@ -111,7 +112,7 @@ struct smp_operations { struct of_cpu_method { const char *method; - struct smp_operations *ops; + const struct smp_operations *ops; }; #define CPU_METHOD_OF_DECLARE(name, _method, _ops) \ @@ -121,6 +122,6 @@ struct of_cpu_method { /* * set platform specific SMP operations */ -extern void smp_set_ops(struct smp_operations *); +extern void smp_set_ops(const struct smp_operations *); #endif /* ifndef __ASM_ARM_SMP_H */ diff --git a/kernel/arch/arm/include/asm/smp_plat.h b/kernel/arch/arm/include/asm/smp_plat.h index 993e5224d..f9080717f 100644 --- a/kernel/arch/arm/include/asm/smp_plat.h +++ b/kernel/arch/arm/include/asm/smp_plat.h @@ -107,4 +107,13 @@ static inline u32 mpidr_hash_size(void) extern int platform_can_secondary_boot(void); extern int platform_can_cpu_hotplug(void); +#ifdef CONFIG_HOTPLUG_CPU +extern int platform_can_hotplug_cpu(unsigned int cpu); +#else +static inline int platform_can_hotplug_cpu(unsigned int cpu) +{ + return 0; +} +#endif + #endif diff --git a/kernel/arch/arm/include/asm/suspend.h b/kernel/arch/arm/include/asm/suspend.h index cd20029bc..6c7182f32 100644 --- a/kernel/arch/arm/include/asm/suspend.h +++ b/kernel/arch/arm/include/asm/suspend.h @@ -7,6 +7,7 @@ struct sleep_save_sp { }; extern void cpu_resume(void); +extern void cpu_resume_arm(void); extern int cpu_suspend(unsigned long, int (*)(unsigned long)); #endif diff --git a/kernel/arch/arm/include/asm/switch_to.h b/kernel/arch/arm/include/asm/switch_to.h index f3e3d800c..c96208460 100644 --- a/kernel/arch/arm/include/asm/switch_to.h +++ b/kernel/arch/arm/include/asm/switch_to.h @@ -17,7 +17,9 @@ switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } * CPU. */ #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) -#define finish_arch_switch(prev) dsb(ish) +#define __complete_pending_tlbi() dsb(ish) +#else +#define __complete_pending_tlbi() #endif /* @@ -29,6 +31,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info #define switch_to(prev,next,last) \ do { \ + __complete_pending_tlbi(); \ switch_kmaps(prev, next); \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) diff --git a/kernel/arch/arm/include/asm/system_info.h b/kernel/arch/arm/include/asm/system_info.h index 720ea0320..3860cbd40 100644 --- a/kernel/arch/arm/include/asm/system_info.h +++ b/kernel/arch/arm/include/asm/system_info.h @@ -17,6 +17,7 @@ /* information about the system we're running on */ extern unsigned int system_rev; +extern const char *system_serial; extern unsigned int system_serial_low; extern unsigned int system_serial_high; extern unsigned int mem_fclk_21285; diff --git a/kernel/arch/arm/include/asm/thread_info.h b/kernel/arch/arm/include/asm/thread_info.h index b5a616376..1f36a4ecc 100644 --- a/kernel/arch/arm/include/asm/thread_info.h +++ b/kernel/arch/arm/include/asm/thread_info.h @@ -25,7 +25,6 @@ struct task_struct; #include <asm/types.h> -#include <asm/domain.h> typedef unsigned long mm_segment_t; @@ -75,9 +74,6 @@ struct thread_info { .flags = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_IO, DOMAIN_CLIENT), \ } #define init_thread_info (init_thread_union.thread_info) @@ -137,23 +133,19 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, /* * thread information flags: - * TIF_SYSCALL_TRACE - syscall trace active - * TIF_SYSCAL_AUDIT - syscall auditing active - * TIF_SIGPENDING - signal pending - * TIF_NEED_RESCHED - rescheduling necessary - * TIF_NOTIFY_RESUME - callback before returning to user * TIF_USEDFPU - FPU was used by this task this quantum (SMP) * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED */ -#define TIF_SIGPENDING 0 -#define TIF_NEED_RESCHED 1 +#define TIF_SIGPENDING 0 /* signal pending */ +#define TIF_NEED_RESCHED 1 /* rescheduling necessary */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ -#define TIF_NEED_RESCHED_LAZY 3 -#define TIF_UPROBE 7 -#define TIF_SYSCALL_TRACE 8 -#define TIF_SYSCALL_AUDIT 9 -#define TIF_SYSCALL_TRACEPOINT 10 -#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ +#define TIF_UPROBE 3 /* breakpointed or singlestepping */ +#define TIF_SYSCALL_TRACE 4 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ +#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ +#define TIF_SECCOMP 8 /* seccomp syscall filtering active */ +#define TIF_NEED_RESCHED_LAZY 7 + #define TIF_NOHZ 12 /* in adaptive nohz mode */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ @@ -178,7 +170,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, * Change these and you break ASM code in entry-common.S */ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ - _TIF_NOTIFY_RESUME | _TIF_UPROBE) + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_NEED_RESCHED_LAZY) #endif /* __KERNEL__ */ #endif /* __ASM_ARM_THREAD_INFO_H */ diff --git a/kernel/arch/arm/include/asm/topology.h b/kernel/arch/arm/include/asm/topology.h index 2fe85fff5..370f7a732 100644 --- a/kernel/arch/arm/include/asm/topology.h +++ b/kernel/arch/arm/include/asm/topology.h @@ -18,7 +18,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS]; #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) #define topology_core_id(cpu) (cpu_topology[cpu].core_id) #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) -#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); diff --git a/kernel/arch/arm/include/asm/uaccess.h b/kernel/arch/arm/include/asm/uaccess.h index 74b17d09e..35c9db857 100644 --- a/kernel/arch/arm/include/asm/uaccess.h +++ b/kernel/arch/arm/include/asm/uaccess.h @@ -50,6 +50,35 @@ struct exception_table_entry extern int fixup_exception(struct pt_regs *regs); /* + * These two functions allow hooking accesses to userspace to increase + * system integrity by ensuring that the kernel can not inadvertantly + * perform such accesses (eg, via list poison values) which could then + * be exploited for priviledge escalation. + */ +static inline unsigned int uaccess_save_and_enable(void) +{ +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + unsigned int old_domain = get_domain(); + + /* Set the current domain access to permit user accesses */ + set_domain((old_domain & ~domain_mask(DOMAIN_USER)) | + domain_val(DOMAIN_USER, DOMAIN_CLIENT)); + + return old_domain; +#else + return 0; +#endif +} + +static inline void uaccess_restore(unsigned int flags) +{ +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* Restore the user access mask */ + set_domain(flags); +#endif +} + +/* * These two are intentionally not defined anywhere - if the kernel * code generates any references to them, that's a bug. */ @@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *); register typeof(x) __r2 asm("r2"); \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ + unsigned int __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(__p))) { \ case 1: \ if (sizeof((x)) >= 8) \ @@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *); break; \ default: __e = __get_user_bad(); break; \ } \ + uaccess_restore(__ua_flags); \ x = (typeof(*(p))) __r2; \ __e; \ }) @@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long); register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ + unsigned int __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(__p))) { \ case 1: \ __put_user_x(__r2, __p, __e, __l, 1); \ @@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long); break; \ default: __e = __put_user_bad(); break; \ } \ + uaccess_restore(__ua_flags); \ __e; \ }) @@ -300,20 +333,23 @@ static inline void set_fs(mm_segment_t fs) do { \ unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_val; \ + unsigned int __ua_flags; \ __chk_user_ptr(ptr); \ might_fault(); \ + __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(ptr))) { \ case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ default: (__gu_val) = __get_user_bad(); \ } \ + uaccess_restore(__ua_flags); \ (x) = (__typeof__(*(ptr)))__gu_val; \ } while (0) -#define __get_user_asm_byte(x, addr, err) \ +#define __get_user_asm(x, addr, err, instr) \ __asm__ __volatile__( \ - "1: " TUSER(ldrb) " %1,[%2],#0\n" \ + "1: " TUSER(instr) " %1, [%2], #0\n" \ "2:\n" \ " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ @@ -329,6 +365,9 @@ do { \ : "r" (addr), "i" (-EFAULT) \ : "cc") +#define __get_user_asm_byte(x, addr, err) \ + __get_user_asm(x, addr, err, ldrb) + #ifndef __ARMEB__ #define __get_user_asm_half(x, __gu_addr, err) \ ({ \ @@ -348,22 +387,7 @@ do { \ #endif #define __get_user_asm_word(x, addr, err) \ - __asm__ __volatile__( \ - "1: " TUSER(ldr) " %1,[%2],#0\n" \ - "2:\n" \ - " .pushsection .text.fixup,\"ax\"\n" \ - " .align 2\n" \ - "3: mov %0, %3\n" \ - " mov %1, #0\n" \ - " b 2b\n" \ - " .popsection\n" \ - " .pushsection __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 3b\n" \ - " .popsection" \ - : "+r" (err), "=&r" (x) \ - : "r" (addr), "i" (-EFAULT) \ - : "cc") + __get_user_asm(x, addr, err, ldr) #define __put_user(x, ptr) \ ({ \ @@ -381,9 +405,11 @@ do { \ #define __put_user_err(x, ptr, err) \ do { \ unsigned long __pu_addr = (unsigned long)(ptr); \ + unsigned int __ua_flags; \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ might_fault(); \ + __ua_flags = uaccess_save_and_enable(); \ switch (sizeof(*(ptr))) { \ case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ @@ -391,11 +417,12 @@ do { \ case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ default: __put_user_bad(); \ } \ + uaccess_restore(__ua_flags); \ } while (0) -#define __put_user_asm_byte(x, __pu_addr, err) \ +#define __put_user_asm(x, __pu_addr, err, instr) \ __asm__ __volatile__( \ - "1: " TUSER(strb) " %1,[%2],#0\n" \ + "1: " TUSER(instr) " %1, [%2], #0\n" \ "2:\n" \ " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ @@ -410,6 +437,9 @@ do { \ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ : "cc") +#define __put_user_asm_byte(x, __pu_addr, err) \ + __put_user_asm(x, __pu_addr, err, strb) + #ifndef __ARMEB__ #define __put_user_asm_half(x, __pu_addr, err) \ ({ \ @@ -427,21 +457,7 @@ do { \ #endif #define __put_user_asm_word(x, __pu_addr, err) \ - __asm__ __volatile__( \ - "1: " TUSER(str) " %1,[%2],#0\n" \ - "2:\n" \ - " .pushsection .text.fixup,\"ax\"\n" \ - " .align 2\n" \ - "3: mov %0, %3\n" \ - " b 2b\n" \ - " .popsection\n" \ - " .pushsection __ex_table,\"a\"\n" \ - " .align 3\n" \ - " .long 1b, 3b\n" \ - " .popsection" \ - : "+r" (err) \ - : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ - : "cc") + __put_user_asm(x, __pu_addr, err, str) #ifndef __ARMEB__ #define __reg_oper0 "%R2" @@ -474,11 +490,50 @@ do { \ #ifdef CONFIG_MMU -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); -extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); -extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); +extern unsigned long __must_check +arm_copy_from_user(void *to, const void __user *from, unsigned long n); + +static inline unsigned long __must_check +__copy_from_user(void *to, const void __user *from, unsigned long n) +{ + unsigned int __ua_flags = uaccess_save_and_enable(); + n = arm_copy_from_user(to, from, n); + uaccess_restore(__ua_flags); + return n; +} + +extern unsigned long __must_check +arm_copy_to_user(void __user *to, const void *from, unsigned long n); +extern unsigned long __must_check +__copy_to_user_std(void __user *to, const void *from, unsigned long n); + +static inline unsigned long __must_check +__copy_to_user(void __user *to, const void *from, unsigned long n) +{ +#ifndef CONFIG_UACCESS_WITH_MEMCPY + unsigned int __ua_flags = uaccess_save_and_enable(); + n = arm_copy_to_user(to, from, n); + uaccess_restore(__ua_flags); + return n; +#else + return arm_copy_to_user(to, from, n); +#endif +} + +extern unsigned long __must_check +arm_clear_user(void __user *addr, unsigned long n); +extern unsigned long __must_check +__clear_user_std(void __user *addr, unsigned long n); + +static inline unsigned long __must_check +__clear_user(void __user *addr, unsigned long n) +{ + unsigned int __ua_flags = uaccess_save_and_enable(); + n = arm_clear_user(addr, n); + uaccess_restore(__ua_flags); + return n; +} + #else #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) @@ -511,6 +566,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo return n; } +/* These are from lib/ code, and use __get_user() and friends */ extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strlen_user(const char __user *str); diff --git a/kernel/arch/arm/include/asm/unified.h b/kernel/arch/arm/include/asm/unified.h index 200f9a7cd..a91ae4996 100644 --- a/kernel/arch/arm/include/asm/unified.h +++ b/kernel/arch/arm/include/asm/unified.h @@ -45,7 +45,6 @@ #define THUMB(x...) x #ifdef __ASSEMBLY__ #define W(instr) instr.w -#define BSYM(sym) sym + 1 #else #define WASM(instr) #instr ".w" #endif @@ -59,7 +58,6 @@ #define THUMB(x...) #ifdef __ASSEMBLY__ #define W(instr) instr -#define BSYM(sym) sym #else #define WASM(instr) #instr #endif diff --git a/kernel/arch/arm/include/asm/unistd.h b/kernel/arch/arm/include/asm/unistd.h index 32640c431..7b84657fb 100644 --- a/kernel/arch/arm/include/asm/unistd.h +++ b/kernel/arch/arm/include/asm/unistd.h @@ -19,14 +19,7 @@ * This may need to be greater than __NR_last_syscall+1 in order to * account for the padding in the syscall table */ -#define __NR_syscalls (388) - -/* - * *NOTE*: This is a ghost syscall private to the kernel. Only the - * __kuser_cmpxchg code in entry-armv.S should be aware of its - * existence. Don't ever use this from user code. - */ -#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) +#define __NR_syscalls (392) #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_GETHOSTNAME diff --git a/kernel/arch/arm/include/asm/vfp.h b/kernel/arch/arm/include/asm/vfp.h index ee5f30842..22e414056 100644 --- a/kernel/arch/arm/include/asm/vfp.h +++ b/kernel/arch/arm/include/asm/vfp.h @@ -5,6 +5,9 @@ * First, the standard VFP set. */ +#ifndef __ASM_VFP_H +#define __ASM_VFP_H + #define FPSID cr0 #define FPSCR cr1 #define MVFR1 cr6 @@ -87,3 +90,9 @@ #define VFPOPDESC_UNUSED_BIT (24) #define VFPOPDESC_UNUSED_MASK (0xFF << VFPOPDESC_UNUSED_BIT) #define VFPOPDESC_OPDESC_MASK (~(VFPOPDESC_LENGTH_MASK | VFPOPDESC_UNUSED_MASK)) + +#ifndef __ASSEMBLY__ +void vfp_disable(void); +#endif + +#endif /* __ASM_VFP_H */ diff --git a/kernel/arch/arm/include/asm/xen/events.h b/kernel/arch/arm/include/asm/xen/events.h index 8b1f37bfe..71e473d05 100644 --- a/kernel/arch/arm/include/asm/xen/events.h +++ b/kernel/arch/arm/include/asm/xen/events.h @@ -20,4 +20,10 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) atomic64_t, \ counter), (val)) +/* Rebind event channel is supported by default */ +static inline bool xen_support_evtchn_rebind(void) +{ + return true; +} + #endif /* _ASM_ARM_XEN_EVENTS_H */ diff --git a/kernel/arch/arm/include/asm/xen/hypervisor.h b/kernel/arch/arm/include/asm/xen/hypervisor.h index 1317ee40f..95251512e 100644 --- a/kernel/arch/arm/include/asm/xen/hypervisor.h +++ b/kernel/arch/arm/include/asm/xen/hypervisor.h @@ -1,6 +1,8 @@ #ifndef _ASM_ARM_XEN_HYPERVISOR_H #define _ASM_ARM_XEN_HYPERVISOR_H +#include <linux/init.h> + extern struct shared_info *HYPERVISOR_shared_info; extern struct start_info *xen_start_info; @@ -18,4 +20,20 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) extern struct dma_map_ops *xen_dma_ops; +#ifdef CONFIG_XEN +void __init xen_early_init(void); +#else +static inline void xen_early_init(void) { return; } +#endif + +#ifdef CONFIG_HOTPLUG_CPU +static inline void xen_arch_register_cpu(int num) +{ +} + +static inline void xen_arch_unregister_cpu(int num) +{ +} +#endif + #endif /* _ASM_ARM_XEN_HYPERVISOR_H */ diff --git a/kernel/arch/arm/include/asm/xen/page-coherent.h b/kernel/arch/arm/include/asm/xen/page-coherent.h index efd562412..9408a994c 100644 --- a/kernel/arch/arm/include/asm/xen/page-coherent.h +++ b/kernel/arch/arm/include/asm/xen/page-coherent.h @@ -35,11 +35,22 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - bool local = PFN_DOWN(dev_addr) == page_to_pfn(page); - /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise - * is a foreign page grant-mapped in dom0. If the page is local we - * can safely call the native dma_ops function, otherwise we call - * the xen specific function. */ + unsigned long page_pfn = page_to_xen_pfn(page); + unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); + unsigned long compound_pages = + (1<<compound_order(page)) * XEN_PFN_PER_PAGE; + bool local = (page_pfn <= dev_pfn) && + (dev_pfn - page_pfn < compound_pages); + + /* + * Dom0 is mapped 1:1, while the Linux page can span across + * multiple Xen pages, it's not possible for it to contain a + * mix of local and foreign Xen pages. So if the first xen_pfn + * == mfn the page is local otherwise it's a foreign page + * grant-mapped in dom0. If the page is local we can safely + * call the native dma_ops function, otherwise we call the xen + * specific function. + */ if (local) __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); else @@ -51,10 +62,14 @@ static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, struct dma_attrs *attrs) { unsigned long pfn = PFN_DOWN(handle); - /* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will - * always return false. If the page is local we can safely call the - * native dma_ops function, otherwise we call the xen specific - * function. */ + /* + * Dom0 is mapped 1:1, while the Linux page can be spanned accross + * multiple Xen page, it's not possible to have a mix of local and + * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a + * foreign mfn will always return false. If the page is local we can + * safely call the native dma_ops function, otherwise we call the xen + * specific function. + */ if (pfn_valid(pfn)) { if (__generic_dma_ops(hwdev)->unmap_page) __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); diff --git a/kernel/arch/arm/include/asm/xen/page.h b/kernel/arch/arm/include/asm/xen/page.h index 0b579b2f4..415dbc6e4 100644 --- a/kernel/arch/arm/include/asm/xen/page.h +++ b/kernel/arch/arm/include/asm/xen/page.h @@ -12,10 +12,6 @@ #include <xen/interface/grant_table.h> #define phys_to_machine_mapping_valid(pfn) (1) -#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) - -#define pte_mfn pte_pfn -#define mfn_pte pfn_pte /* Xen machine address */ typedef struct xmaddr { @@ -32,10 +28,33 @@ typedef struct xpaddr { #define INVALID_P2M_ENTRY (~0UL) +/* + * The pseudo-physical frame (pfn) used in all the helpers is always based + * on Xen page granularity (i.e 4KB). + * + * A Linux page may be split across multiple non-contiguous Xen page so we + * have to keep track with frame based on 4KB page granularity. + * + * PV drivers should never make a direct usage of those helpers (particularly + * pfn_to_gfn and gfn_to_pfn). + */ + unsigned long __pfn_to_mfn(unsigned long pfn); extern struct rb_root phys_to_mach; -static inline unsigned long pfn_to_mfn(unsigned long pfn) +/* Pseudo-physical <-> Guest conversion */ +static inline unsigned long pfn_to_gfn(unsigned long pfn) +{ + return pfn; +} + +static inline unsigned long gfn_to_pfn(unsigned long gfn) +{ + return gfn; +} + +/* Pseudo-physical <-> BUS conversion */ +static inline unsigned long pfn_to_bfn(unsigned long pfn) { unsigned long mfn; @@ -48,33 +67,21 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) return pfn; } -static inline unsigned long mfn_to_pfn(unsigned long mfn) +static inline unsigned long bfn_to_pfn(unsigned long bfn) { - return mfn; + return bfn; } -#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn) - -static inline xmaddr_t phys_to_machine(xpaddr_t phys) -{ - unsigned offset = phys.paddr & ~PAGE_MASK; - return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); -} +#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) -static inline xpaddr_t machine_to_phys(xmaddr_t machine) -{ - unsigned offset = machine.maddr & ~PAGE_MASK; - return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); -} -/* VIRT <-> MACHINE conversion */ -#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) -#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) -#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) +/* VIRT <-> GUEST conversion */ +#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT)) +#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT)) +/* Only used in PV code. But ARM guests are always HVM. */ static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) { - /* TODO: assuming it is mapped in the kernel 1:1 */ - return virt_to_machine(vaddr); + BUG(); } /* TODO: this shouldn't be here but it is because the frontend drivers @@ -108,8 +115,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) #define xen_unmap(cookie) iounmap((cookie)) bool xen_arch_need_swiotlb(struct device *dev, - unsigned long pfn, - unsigned long mfn); + phys_addr_t phys, + dma_addr_t dev_addr); unsigned long xen_get_swiotlb_free_pages(unsigned int order); #endif /* _ASM_ARM_XEN_PAGE_H */ |