summaryrefslogtreecommitdiffstats
path: root/kernel/arch/powerpc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/powerpc/include/asm')
-rw-r--r--kernel/arch/powerpc/include/asm/Kbuild2
-rw-r--r--kernel/arch/powerpc/include/asm/archrandom.h28
-rw-r--r--kernel/arch/powerpc/include/asm/atomic.h7
-rw-r--r--kernel/arch/powerpc/include/asm/barrier.h7
-rw-r--r--kernel/arch/powerpc/include/asm/cache.h7
-rw-r--r--kernel/arch/powerpc/include/asm/cacheflush.h7
-rw-r--r--kernel/arch/powerpc/include/asm/checksum.h37
-rw-r--r--kernel/arch/powerpc/include/asm/cmpxchg.h17
-rw-r--r--kernel/arch/powerpc/include/asm/compat.h7
-rw-r--r--kernel/arch/powerpc/include/asm/cputable.h12
-rw-r--r--kernel/arch/powerpc/include/asm/cputhreads.h13
-rw-r--r--kernel/arch/powerpc/include/asm/device.h18
-rw-r--r--kernel/arch/powerpc/include/asm/disassemble.h5
-rw-r--r--kernel/arch/powerpc/include/asm/dma-mapping.h82
-rw-r--r--kernel/arch/powerpc/include/asm/edac.h4
-rw-r--r--kernel/arch/powerpc/include/asm/eeh.h10
-rw-r--r--kernel/arch/powerpc/include/asm/exception-64e.h15
-rw-r--r--kernel/arch/powerpc/include/asm/fsl_guts.h192
-rw-r--r--kernel/arch/powerpc/include/asm/ftrace.h2
-rw-r--r--kernel/arch/powerpc/include/asm/highmem.h13
-rw-r--r--kernel/arch/powerpc/include/asm/hugetlb.h14
-rw-r--r--kernel/arch/powerpc/include/asm/icswx.h184
-rw-r--r--kernel/arch/powerpc/include/asm/io.h1
-rw-r--r--kernel/arch/powerpc/include/asm/iommu.h144
-rw-r--r--kernel/arch/powerpc/include/asm/jump_label.h19
-rw-r--r--kernel/arch/powerpc/include/asm/kvm_book3s.h5
-rw-r--r--kernel/arch/powerpc/include/asm/kvm_book3s_64.h2
-rw-r--r--kernel/arch/powerpc/include/asm/kvm_book3s_asm.h22
-rw-r--r--kernel/arch/powerpc/include/asm/kvm_booke.h4
-rw-r--r--kernel/arch/powerpc/include/asm/kvm_host.h38
-rw-r--r--kernel/arch/powerpc/include/asm/kvm_ppc.h14
-rw-r--r--kernel/arch/powerpc/include/asm/machdep.h42
-rw-r--r--kernel/arch/powerpc/include/asm/mm-arch-hooks.h28
-rw-r--r--kernel/arch/powerpc/include/asm/mmu-8xx.h33
-rw-r--r--kernel/arch/powerpc/include/asm/mmu-hash64.h4
-rw-r--r--kernel/arch/powerpc/include/asm/mmu_context.h41
-rw-r--r--kernel/arch/powerpc/include/asm/mpc5121.h59
-rw-r--r--kernel/arch/powerpc/include/asm/mpc52xx_psc.h7
-rw-r--r--kernel/arch/powerpc/include/asm/msi_bitmap.h1
-rw-r--r--kernel/arch/powerpc/include/asm/opal-api.h163
-rw-r--r--kernel/arch/powerpc/include/asm/opal.h19
-rw-r--r--kernel/arch/powerpc/include/asm/page.h36
-rw-r--r--kernel/arch/powerpc/include/asm/pci-bridge.h17
-rw-r--r--kernel/arch/powerpc/include/asm/pci.h32
-rw-r--r--kernel/arch/powerpc/include/asm/pgtable-ppc32.h19
-rw-r--r--kernel/arch/powerpc/include/asm/pgtable-ppc64.h64
-rw-r--r--kernel/arch/powerpc/include/asm/pgtable.h17
-rw-r--r--kernel/arch/powerpc/include/asm/pnv-pci.h2
-rw-r--r--kernel/arch/powerpc/include/asm/ppc-opcode.h30
-rw-r--r--kernel/arch/powerpc/include/asm/ppc-pci.h1
-rw-r--r--kernel/arch/powerpc/include/asm/processor.h10
-rw-r--r--kernel/arch/powerpc/include/asm/pte-8xx.h31
-rw-r--r--kernel/arch/powerpc/include/asm/pte-book3e.h1
-rw-r--r--kernel/arch/powerpc/include/asm/pte-common.h5
-rw-r--r--kernel/arch/powerpc/include/asm/pte-hash64.h1
-rw-r--r--kernel/arch/powerpc/include/asm/qe_ic.h23
-rw-r--r--kernel/arch/powerpc/include/asm/reg.h14
-rw-r--r--kernel/arch/powerpc/include/asm/reg_booke.h6
-rw-r--r--kernel/arch/powerpc/include/asm/spinlock.h2
-rw-r--r--kernel/arch/powerpc/include/asm/spu_csa.h6
-rw-r--r--kernel/arch/powerpc/include/asm/synch.h2
-rw-r--r--kernel/arch/powerpc/include/asm/syscall.h54
-rw-r--r--kernel/arch/powerpc/include/asm/systbl.h17
-rw-r--r--kernel/arch/powerpc/include/asm/topology.h2
-rw-r--r--kernel/arch/powerpc/include/asm/trace.h20
-rw-r--r--kernel/arch/powerpc/include/asm/trace_clock.h19
-rw-r--r--kernel/arch/powerpc/include/asm/tsi108_pci.h2
-rw-r--r--kernel/arch/powerpc/include/asm/uaccess.h8
-rw-r--r--kernel/arch/powerpc/include/asm/unistd.h2
-rw-r--r--kernel/arch/powerpc/include/asm/vio.h2
-rw-r--r--kernel/arch/powerpc/include/asm/word-at-a-time.h5
71 files changed, 1137 insertions, 642 deletions
diff --git a/kernel/arch/powerpc/include/asm/Kbuild b/kernel/arch/powerpc/include/asm/Kbuild
index 4b87205c2..ab9f4e0ed 100644
--- a/kernel/arch/powerpc/include/asm/Kbuild
+++ b/kernel/arch/powerpc/include/asm/Kbuild
@@ -6,6 +6,4 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += rwsem.h
-generic-y += scatterlist.h
-generic-y += trace_clock.h
generic-y += vtime.h
diff --git a/kernel/arch/powerpc/include/asm/archrandom.h b/kernel/arch/powerpc/include/asm/archrandom.h
index 0cc6eedc4..85e88f7a5 100644
--- a/kernel/arch/powerpc/include/asm/archrandom.h
+++ b/kernel/arch/powerpc/include/asm/archrandom.h
@@ -7,14 +7,23 @@
static inline int arch_get_random_long(unsigned long *v)
{
- if (ppc_md.get_random_long)
- return ppc_md.get_random_long(v);
-
return 0;
}
static inline int arch_get_random_int(unsigned int *v)
{
+ return 0;
+}
+
+static inline int arch_get_random_seed_long(unsigned long *v)
+{
+ if (ppc_md.get_random_seed)
+ return ppc_md.get_random_seed(v);
+
+ return 0;
+}
+static inline int arch_get_random_seed_int(unsigned int *v)
+{
unsigned long val;
int rc;
@@ -27,22 +36,13 @@ static inline int arch_get_random_int(unsigned int *v)
static inline int arch_has_random(void)
{
- return !!ppc_md.get_random_long;
-}
-
-static inline int arch_get_random_seed_long(unsigned long *v)
-{
- return 0;
-}
-static inline int arch_get_random_seed_int(unsigned int *v)
-{
return 0;
}
+
static inline int arch_has_random_seed(void)
{
- return 0;
+ return !!ppc_md.get_random_seed;
}
-
#endif /* CONFIG_ARCH_RANDOM */
#ifdef CONFIG_PPC_POWERNV
diff --git a/kernel/arch/powerpc/include/asm/atomic.h b/kernel/arch/powerpc/include/asm/atomic.h
index 512d2782b..55f106ed1 100644
--- a/kernel/arch/powerpc/include/asm/atomic.h
+++ b/kernel/arch/powerpc/include/asm/atomic.h
@@ -67,6 +67,10 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)
+ATOMIC_OP(and, and)
+ATOMIC_OP(or, or)
+ATOMIC_OP(xor, xor)
+
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -304,6 +308,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf)
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(or, or)
+ATOMIC64_OP(xor, xor)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
diff --git a/kernel/arch/powerpc/include/asm/barrier.h b/kernel/arch/powerpc/include/asm/barrier.h
index a3bf5be11..0eca6efc0 100644
--- a/kernel/arch/powerpc/include/asm/barrier.h
+++ b/kernel/arch/powerpc/include/asm/barrier.h
@@ -34,7 +34,7 @@
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
-#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#ifdef __SUBARCH_HAS_LWSYNC
# define SMPWMB LWSYNC
@@ -76,12 +76,12 @@
do { \
compiletime_assert_atomic_type(*p); \
smp_lwsync(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_lwsync(); \
___p1; \
@@ -89,5 +89,6 @@ do { \
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()
+#define smp_mb__before_spinlock() smp_mb()
#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/kernel/arch/powerpc/include/asm/cache.h b/kernel/arch/powerpc/include/asm/cache.h
index 0dc42c508..5f8229e24 100644
--- a/kernel/arch/powerpc/include/asm/cache.h
+++ b/kernel/arch/powerpc/include/asm/cache.h
@@ -3,7 +3,6 @@
#ifdef __KERNEL__
-#include <asm/reg.h>
/* bytes per L1 cache line */
#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
@@ -40,12 +39,6 @@ struct ppc64_caches {
};
extern struct ppc64_caches ppc64_caches;
-
-static inline void logmpp(u64 x)
-{
- asm volatile(PPC_LOGMPP(R1) : : "r" (x));
-}
-
#endif /* __powerpc64__ && ! __ASSEMBLY__ */
#if defined(__ASSEMBLY__)
diff --git a/kernel/arch/powerpc/include/asm/cacheflush.h b/kernel/arch/powerpc/include/asm/cacheflush.h
index 30b35fff2..6229e6b60 100644
--- a/kernel/arch/powerpc/include/asm/cacheflush.h
+++ b/kernel/arch/powerpc/include/asm/cacheflush.h
@@ -40,7 +40,12 @@ extern void __flush_dcache_icache(void *page_va);
extern void flush_dcache_icache_page(struct page *page);
#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
extern void __flush_dcache_icache_phys(unsigned long physaddr);
-#endif /* CONFIG_PPC32 && !CONFIG_BOOKE */
+#else
+static inline void __flush_dcache_icache_phys(unsigned long physaddr)
+{
+ BUG();
+}
+#endif
extern void flush_dcache_range(unsigned long start, unsigned long stop);
#ifdef CONFIG_PPC32
diff --git a/kernel/arch/powerpc/include/asm/checksum.h b/kernel/arch/powerpc/include/asm/checksum.h
index 8251a3ba8..e8d9ef475 100644
--- a/kernel/arch/powerpc/include/asm/checksum.h
+++ b/kernel/arch/powerpc/include/asm/checksum.h
@@ -20,15 +20,6 @@
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum);
-
-/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
@@ -127,6 +118,34 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
#endif
}
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+#ifdef __powerpc64__
+ u64 res = (__force u64)csum;
+
+ res += (__force u64)addend;
+ return (__force __wsum)((u32)res + (res >> 32));
+#else
+ asm("addc %0,%0,%1;"
+ "addze %0,%0;"
+ : "+r" (csum) : "r" (addend));
+ return csum;
+#endif
+}
+
#endif
#endif /* __KERNEL__ */
#endif
diff --git a/kernel/arch/powerpc/include/asm/cmpxchg.h b/kernel/arch/powerpc/include/asm/cmpxchg.h
index d463c68fe..d1a8d93cc 100644
--- a/kernel/arch/powerpc/include/asm/cmpxchg.h
+++ b/kernel/arch/powerpc/include/asm/cmpxchg.h
@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val)
unsigned long prev;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stwcx. %3,0,%2 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
: "r" (p), "r" (val)
: "cc", "memory");
@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val)
unsigned long prev;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stdcx. %3,0,%2 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
: "r" (p), "r" (val)
: "cc", "memory");
@@ -144,7 +144,6 @@ __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
* Compare and exchange - if *p == old, set it to new,
* and return the old value of *p.
*/
-#define __HAVE_ARCH_CMPXCHG 1
static __always_inline unsigned long
__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
@@ -152,14 +151,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
unsigned int prev;
__asm__ __volatile__ (
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\
bne- 2f\n"
PPC405_ERR77(0,%2)
" stwcx. %4,0,%2\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
"\n\
2:"
: "=&r" (prev), "+m" (*p)
@@ -198,13 +197,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
unsigned long prev;
__asm__ __volatile__ (
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
cmpd 0,%0,%3\n\
bne- 2f\n\
stdcx. %4,0,%2\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
"\n\
2:"
: "=&r" (prev), "+m" (*p)
diff --git a/kernel/arch/powerpc/include/asm/compat.h b/kernel/arch/powerpc/include/asm/compat.h
index b142b8e0e..4f2df589e 100644
--- a/kernel/arch/powerpc/include/asm/compat.h
+++ b/kernel/arch/powerpc/include/asm/compat.h
@@ -174,6 +174,13 @@ typedef struct compat_siginfo {
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
+
+ /* SIGSYS */
+ struct {
+ unsigned int _call_addr; /* calling insn */
+ int _syscall; /* triggering system call number */
+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
} _sifields;
} compat_siginfo_t;
diff --git a/kernel/arch/powerpc/include/asm/cputable.h b/kernel/arch/powerpc/include/asm/cputable.h
index 6367b8347..b11807267 100644
--- a/kernel/arch/powerpc/include/asm/cputable.h
+++ b/kernel/arch/powerpc/include/asm/cputable.h
@@ -242,11 +242,13 @@ enum {
/* We only set the TM feature if the kernel was compiled with TM supprt */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-#define CPU_FTR_TM_COMP CPU_FTR_TM
-#define PPC_FEATURE2_HTM_COMP PPC_FEATURE2_HTM
+#define CPU_FTR_TM_COMP CPU_FTR_TM
+#define PPC_FEATURE2_HTM_COMP PPC_FEATURE2_HTM
+#define PPC_FEATURE2_HTM_NOSC_COMP PPC_FEATURE2_HTM_NOSC
#else
-#define CPU_FTR_TM_COMP 0
-#define PPC_FEATURE2_HTM_COMP 0
+#define CPU_FTR_TM_COMP 0
+#define PPC_FEATURE2_HTM_COMP 0
+#define PPC_FEATURE2_HTM_NOSC_COMP 0
#endif
/* We need to mark all pages as being coherent if we're SMP or we have a
@@ -366,7 +368,7 @@ enum {
CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_USE_TB)
-#define CPU_FTRS_8XX (CPU_FTR_USE_TB)
+#define CPU_FTRS_8XX (CPU_FTR_USE_TB | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
diff --git a/kernel/arch/powerpc/include/asm/cputhreads.h b/kernel/arch/powerpc/include/asm/cputhreads.h
index 5be6c4753..ba42e46ea 100644
--- a/kernel/arch/powerpc/include/asm/cputhreads.h
+++ b/kernel/arch/powerpc/include/asm/cputhreads.h
@@ -31,9 +31,9 @@ extern cpumask_t threads_core_mask;
/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
* hit by the argument
*
- * @threads: a cpumask of threads
+ * @threads: a cpumask of online threads
*
- * This function returns a cpumask which will have one "cpu" (or thread)
+ * This function returns a cpumask which will have one online cpu's
* bit set for each core that has at least one thread set in the argument.
*
* This can typically be used for things like IPI for tlb invalidations
@@ -42,13 +42,16 @@ extern cpumask_t threads_core_mask;
static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
{
cpumask_t tmp, res;
- int i;
+ int i, cpu;
cpumask_clear(&res);
for (i = 0; i < NR_CPUS; i += threads_per_core) {
cpumask_shift_left(&tmp, &threads_core_mask, i);
- if (cpumask_intersects(threads, &tmp))
- cpumask_set_cpu(i, &res);
+ if (cpumask_intersects(threads, &tmp)) {
+ cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
+ if (cpu < nr_cpu_ids)
+ cpumask_set_cpu(cpu, &res);
+ }
}
return res;
}
diff --git a/kernel/arch/powerpc/include/asm/device.h b/kernel/arch/powerpc/include/asm/device.h
index 9f1371bab..406c2b1ff 100644
--- a/kernel/arch/powerpc/include/asm/device.h
+++ b/kernel/arch/powerpc/include/asm/device.h
@@ -10,6 +10,7 @@ struct dma_map_ops;
struct device_node;
#ifdef CONFIG_PPC64
struct pci_dn;
+struct iommu_table;
#endif
/*
@@ -23,13 +24,15 @@ struct dev_archdata {
struct dma_map_ops *dma_ops;
/*
- * When an iommu is in use, dma_data is used as a ptr to the base of the
- * iommu_table. Otherwise, it is a simple numerical offset.
+ * These two used to be a union. However, with the hybrid ops we need
+ * both so here we store both a DMA offset for direct mappings and
+ * an iommu_table for remapped DMA.
*/
- union {
- dma_addr_t dma_offset;
- void *iommu_table_base;
- } dma_data;
+ dma_addr_t dma_offset;
+
+#ifdef CONFIG_PPC64
+ struct iommu_table *iommu_table_base;
+#endif
#ifdef CONFIG_IOMMU_API
void *iommu_domain;
@@ -46,6 +49,9 @@ struct dev_archdata {
#ifdef CONFIG_FAIL_IOMMU
int fail_iommu;
#endif
+#ifdef CONFIG_CXL_BASE
+ struct cxl_context *cxl_ctx;
+#endif
};
struct pdev_archdata {
diff --git a/kernel/arch/powerpc/include/asm/disassemble.h b/kernel/arch/powerpc/include/asm/disassemble.h
index 6330a61b8..4852e8491 100644
--- a/kernel/arch/powerpc/include/asm/disassemble.h
+++ b/kernel/arch/powerpc/include/asm/disassemble.h
@@ -42,6 +42,11 @@ static inline unsigned int get_dcrn(u32 inst)
return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
}
+static inline unsigned int get_tmrn(u32 inst)
+{
+ return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
static inline unsigned int get_rt(u32 inst)
{
return (inst >> 21) & 0x1f;
diff --git a/kernel/arch/powerpc/include/asm/dma-mapping.h b/kernel/arch/powerpc/include/asm/dma-mapping.h
index 9103687b0..7f522c021 100644
--- a/kernel/arch/powerpc/include/asm/dma-mapping.h
+++ b/kernel/arch/powerpc/include/asm/dma-mapping.h
@@ -18,15 +18,17 @@
#include <asm/io.h>
#include <asm/swiotlb.h>
+#ifdef CONFIG_PPC64
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
+#endif
/* Some dma direct funcs must be visible for use in other dma_ops */
-extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
+extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs);
+extern void __dma_direct_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs);
-extern void dma_direct_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs);
extern int dma_direct_mmap_coherent(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle,
@@ -106,7 +108,7 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
static inline dma_addr_t get_dma_offset(struct device *dev)
{
if (dev)
- return dev->archdata.dma_data.dma_offset;
+ return dev->archdata.dma_offset;
return PCI_DRAM_OFFSET;
}
@@ -114,77 +116,20 @@ static inline dma_addr_t get_dma_offset(struct device *dev)
static inline void set_dma_offset(struct device *dev, dma_addr_t off)
{
if (dev)
- dev->archdata.dma_data.dma_offset = off;
+ dev->archdata.dma_offset = off;
}
/* this will be removed soon */
#define flush_write_buffers()
-#include <asm-generic/dma-mapping-common.h>
-
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
+#define HAVE_ARCH_DMA_SET_MASK 1
+extern int dma_set_mask(struct device *dev, u64 dma_mask);
- if (unlikely(dma_ops == NULL))
- return 0;
- if (dma_ops->dma_supported == NULL)
- return 1;
- return dma_ops->dma_supported(dev, mask);
-}
+#include <asm-generic/dma-mapping-common.h>
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern int __dma_set_mask(struct device *dev, u64 dma_mask);
extern u64 __dma_get_required_mask(struct device *dev);
-#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
- void *cpu_addr;
-
- BUG_ON(!dma_ops);
-
- cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
-
- debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
-
- return cpu_addr;
-}
-
-#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
- BUG_ON(!dma_ops);
-
- debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
-
- dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
- debug_dma_mapping_error(dev, dma_addr);
- if (dma_ops->mapping_error)
- return dma_ops->mapping_error(dev, dma_addr);
-
-#ifdef CONFIG_PPC64
- return (dma_addr == DMA_ERROR_CODE);
-#else
- return 0;
-#endif
-}
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
#ifdef CONFIG_SWIOTLB
@@ -210,9 +155,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
return daddr - get_dma_offset(dev);
}
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
#define ARCH_HAS_DMA_MMAP_COHERENT
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/kernel/arch/powerpc/include/asm/edac.h b/kernel/arch/powerpc/include/asm/edac.h
index 6ead88bbf..5571e23d2 100644
--- a/kernel/arch/powerpc/include/asm/edac.h
+++ b/kernel/arch/powerpc/include/asm/edac.h
@@ -12,11 +12,11 @@
#define ASM_EDAC_H
/*
* ECC atomic, DMA, SMP and interrupt safe scrub function.
- * Implements the per arch atomic_scrub() that EDAC use for software
+ * Implements the per arch edac_atomic_scrub() that EDAC use for software
* ECC scrubbing. It reads memory and then writes back the original
* value, allowing the hardware to detect and correct memory errors.
*/
-static __inline__ void atomic_scrub(void *va, u32 size)
+static __inline__ void edac_atomic_scrub(void *va, u32 size)
{
unsigned int *virt_addr = va;
unsigned int temp;
diff --git a/kernel/arch/powerpc/include/asm/eeh.h b/kernel/arch/powerpc/include/asm/eeh.h
index a52db28ec..867c39b45 100644
--- a/kernel/arch/powerpc/include/asm/eeh.h
+++ b/kernel/arch/powerpc/include/asm/eeh.h
@@ -27,6 +27,8 @@
#include <linux/time.h>
#include <linux/atomic.h>
+#include <uapi/asm/eeh.h>
+
struct pci_dev;
struct pci_bus;
struct pci_dn;
@@ -79,6 +81,7 @@ struct pci_dn;
#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
#define EEH_PE_REMOVED (1 << 10) /* Removed permanently */
+#define EEH_PE_PRI_BUS (1 << 11) /* Cached primary bus */
struct eeh_pe {
int type; /* PE type: PHB/Bus/Device */
@@ -185,11 +188,6 @@ enum {
#define EEH_STATE_DMA_ACTIVE (1 << 4) /* Active DMA */
#define EEH_STATE_MMIO_ENABLED (1 << 5) /* MMIO enabled */
#define EEH_STATE_DMA_ENABLED (1 << 6) /* DMA enabled */
-#define EEH_PE_STATE_NORMAL 0 /* Normal state */
-#define EEH_PE_STATE_RESET 1 /* PE reset asserted */
-#define EEH_PE_STATE_STOPPED_IO_DMA 2 /* Frozen PE */
-#define EEH_PE_STATE_STOPPED_DMA 4 /* Stopped DMA, Enabled IO */
-#define EEH_PE_STATE_UNAVAIL 5 /* Unavailable */
#define EEH_RESET_DEACTIVATE 0 /* Deactivate the PE reset */
#define EEH_RESET_HOT 1 /* Hot reset */
#define EEH_RESET_FUNDAMENTAL 3 /* Fundamental reset */
@@ -294,6 +292,8 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option);
int eeh_pe_get_state(struct eeh_pe *pe);
int eeh_pe_reset(struct eeh_pe *pe, int option);
int eeh_pe_configure(struct eeh_pe *pe);
+int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
+ unsigned long addr, unsigned long mask);
/**
* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
diff --git a/kernel/arch/powerpc/include/asm/exception-64e.h b/kernel/arch/powerpc/include/asm/exception-64e.h
index a8b52b610..a703452d6 100644
--- a/kernel/arch/powerpc/include/asm/exception-64e.h
+++ b/kernel/arch/powerpc/include/asm/exception-64e.h
@@ -69,13 +69,14 @@
#define EX_TLB_ESR ( 9 * 8) /* Level 0 and 2 only */
#define EX_TLB_SRR0 (10 * 8)
#define EX_TLB_SRR1 (11 * 8)
+#define EX_TLB_R7 (12 * 8)
#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
-#define EX_TLB_R8 (12 * 8)
-#define EX_TLB_R9 (13 * 8)
-#define EX_TLB_LR (14 * 8)
-#define EX_TLB_SIZE (15 * 8)
+#define EX_TLB_R8 (13 * 8)
+#define EX_TLB_R9 (14 * 8)
+#define EX_TLB_LR (15 * 8)
+#define EX_TLB_SIZE (16 * 8)
#else
-#define EX_TLB_SIZE (12 * 8)
+#define EX_TLB_SIZE (13 * 8)
#endif
#define START_EXCEPTION(label) \
@@ -204,8 +205,8 @@ exc_##label##_book3e:
#endif
#define SET_IVOR(vector_number, vector_offset) \
- li r3,vector_offset@l; \
- ori r3,r3,interrupt_base_book3e@l; \
+ LOAD_REG_ADDR(r3,interrupt_base_book3e);\
+ ori r3,r3,vector_offset@l; \
mtspr SPRN_IVOR##vector_number,r3;
#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
diff --git a/kernel/arch/powerpc/include/asm/fsl_guts.h b/kernel/arch/powerpc/include/asm/fsl_guts.h
deleted file mode 100644
index 43b6bb1a4..000000000
--- a/kernel/arch/powerpc/include/asm/fsl_guts.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/**
- * Freecale 85xx and 86xx Global Utilties register set
- *
- * Authors: Jeff Brown
- * Timur Tabi <timur@freescale.com>
- *
- * Copyright 2004,2007,2012 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef __ASM_POWERPC_FSL_GUTS_H__
-#define __ASM_POWERPC_FSL_GUTS_H__
-#ifdef __KERNEL__
-
-/**
- * Global Utility Registers.
- *
- * Not all registers defined in this structure are available on all chips, so
- * you are expected to know whether a given register actually exists on your
- * chip before you access it.
- *
- * Also, some registers are similar on different chips but have slightly
- * different names. In these cases, one name is chosen to avoid extraneous
- * #ifdefs.
- */
-struct ccsr_guts {
- __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
- __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
- __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */
- __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */
- __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
- __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */
- u8 res018[0x20 - 0x18];
- __be32 porcir; /* 0x.0020 - POR Configuration Information Register */
- u8 res024[0x30 - 0x24];
- __be32 gpiocr; /* 0x.0030 - GPIO Control Register */
- u8 res034[0x40 - 0x34];
- __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */
- u8 res044[0x50 - 0x44];
- __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */
- u8 res054[0x60 - 0x54];
- __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */
- __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */
- __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
- u8 res06c[0x70 - 0x6c];
- __be32 devdisr; /* 0x.0070 - Device Disable Control */
-#define CCSR_GUTS_DEVDISR_TB1 0x00001000
-#define CCSR_GUTS_DEVDISR_TB0 0x00004000
- __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
- u8 res078[0x7c - 0x78];
- __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */
- __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */
- __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */
- __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */
- __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */
- __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */
- __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */
- __be32 ectrstcr; /* 0x.0098 - Exception reset control register */
- __be32 autorstsr; /* 0x.009c - Automatic reset status register */
- __be32 pvr; /* 0x.00a0 - Processor Version Register */
- __be32 svr; /* 0x.00a4 - System Version Register */
- u8 res0a8[0xb0 - 0xa8];
- __be32 rstcr; /* 0x.00b0 - Reset Control Register */
- u8 res0b4[0xc0 - 0xb4];
- __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register
- Called 'elbcvselcr' on 86xx SOCs */
- u8 res0c4[0x100 - 0xc4];
- __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers
- There are 16 registers */
- u8 res140[0x224 - 0x140];
- __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */
- __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */
- u8 res22c[0x604 - 0x22c];
- __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */
- u8 res608[0x800 - 0x608];
- __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */
- u8 res804[0x900 - 0x804];
- __be32 ircr; /* 0x.0900 - Infrared Control Register */
- u8 res904[0x908 - 0x904];
- __be32 dmacr; /* 0x.0908 - DMA Control Register */
- u8 res90c[0x914 - 0x90c];
- __be32 elbccr; /* 0x.0914 - eLBC Control Register */
- u8 res918[0xb20 - 0x918];
- __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */
- __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */
- __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */
- u8 resb2c[0xe00 - 0xb2c];
- __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */
- u8 rese04[0xe10 - 0xe04];
- __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
- u8 rese14[0xe20 - 0xe14];
- __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
- __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */
- u8 rese28[0xf04 - 0xe28];
- __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */
- __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */
- u8 resf0c[0xf2c - 0xf0c];
- __be32 itcr; /* 0x.0f2c - Internal transaction control register */
- u8 resf30[0xf40 - 0xf30];
- __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */
- __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
-} __attribute__ ((packed));
-
-
-/* Alternate function signal multiplex control */
-#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x))
-
-#ifdef CONFIG_PPC_86xx
-
-#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */
-#define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */
-
-/*
- * Set the DMACR register in the GUTS
- *
- * The DMACR register determines the source of initiated transfers for each
- * channel on each DMA controller. Rather than have a bunch of repetitive
- * macros for the bit patterns, we just have a function that calculates
- * them.
- *
- * guts: Pointer to GUTS structure
- * co: The DMA controller (0 or 1)
- * ch: The channel on the DMA controller (0, 1, 2, or 3)
- * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx)
- */
-static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts,
- unsigned int co, unsigned int ch, unsigned int device)
-{
- unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch));
-
- clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift);
-}
-
-#define CCSR_GUTS_PMUXCR_LDPSEL 0x00010000
-#define CCSR_GUTS_PMUXCR_SSI1_MASK 0x0000C000 /* Bitmask for SSI1 */
-#define CCSR_GUTS_PMUXCR_SSI1_LA 0x00000000 /* Latched address */
-#define CCSR_GUTS_PMUXCR_SSI1_HI 0x00004000 /* High impedance */
-#define CCSR_GUTS_PMUXCR_SSI1_SSI 0x00008000 /* Used for SSI1 */
-#define CCSR_GUTS_PMUXCR_SSI2_MASK 0x00003000 /* Bitmask for SSI2 */
-#define CCSR_GUTS_PMUXCR_SSI2_LA 0x00000000 /* Latched address */
-#define CCSR_GUTS_PMUXCR_SSI2_HI 0x00001000 /* High impedance */
-#define CCSR_GUTS_PMUXCR_SSI2_SSI 0x00002000 /* Used for SSI2 */
-#define CCSR_GUTS_PMUXCR_LA_22_25_LA 0x00000000 /* Latched Address */
-#define CCSR_GUTS_PMUXCR_LA_22_25_HI 0x00000400 /* High impedance */
-#define CCSR_GUTS_PMUXCR_DBGDRV 0x00000200 /* Signals not driven */
-#define CCSR_GUTS_PMUXCR_DMA2_0 0x00000008
-#define CCSR_GUTS_PMUXCR_DMA2_3 0x00000004
-#define CCSR_GUTS_PMUXCR_DMA1_0 0x00000002
-#define CCSR_GUTS_PMUXCR_DMA1_3 0x00000001
-
-/*
- * Set the DMA external control bits in the GUTS
- *
- * The DMA external control bits in the PMUXCR are only meaningful for
- * channels 0 and 3. Any other channels are ignored.
- *
- * guts: Pointer to GUTS structure
- * co: The DMA controller (0 or 1)
- * ch: The channel on the DMA controller (0, 1, 2, or 3)
- * value: the new value for the bit (0 or 1)
- */
-static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
- unsigned int co, unsigned int ch, unsigned int value)
-{
- if ((ch == 0) || (ch == 3)) {
- unsigned int shift = 2 * (co + 1) - (ch & 1) - 1;
-
- clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift);
- }
-}
-
-#define CCSR_GUTS_CLKDVDR_PXCKEN 0x80000000
-#define CCSR_GUTS_CLKDVDR_SSICKEN 0x20000000
-#define CCSR_GUTS_CLKDVDR_PXCKINV 0x10000000
-#define CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT 25
-#define CCSR_GUTS_CLKDVDR_PXCKDLY_MASK 0x06000000
-#define CCSR_GUTS_CLKDVDR_PXCKDLY(x) \
- (((x) & 3) << CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT)
-#define CCSR_GUTS_CLKDVDR_PXCLK_SHIFT 16
-#define CCSR_GUTS_CLKDVDR_PXCLK_MASK 0x001F0000
-#define CCSR_GUTS_CLKDVDR_PXCLK(x) (((x) & 31) << CCSR_GUTS_CLKDVDR_PXCLK_SHIFT)
-#define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF
-#define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK)
-
-#endif
-
-#endif
-#endif
diff --git a/kernel/arch/powerpc/include/asm/ftrace.h b/kernel/arch/powerpc/include/asm/ftrace.h
index e3661872f..ef89b1465 100644
--- a/kernel/arch/powerpc/include/asm/ftrace.h
+++ b/kernel/arch/powerpc/include/asm/ftrace.h
@@ -2,7 +2,7 @@
#define _ASM_POWERPC_FTRACE
#ifdef CONFIG_FUNCTION_TRACER
-#define MCOUNT_ADDR ((long)(_mcount))
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifdef __ASSEMBLY__
diff --git a/kernel/arch/powerpc/include/asm/highmem.h b/kernel/arch/powerpc/include/asm/highmem.h
index caaf6e006..01c2c23b3 100644
--- a/kernel/arch/powerpc/include/asm/highmem.h
+++ b/kernel/arch/powerpc/include/asm/highmem.h
@@ -84,19 +84,6 @@ static inline void *kmap_atomic(struct page *page)
return kmap_atomic_prot(page, kmap_prot);
}
-static inline struct page *kmap_atomic_to_page(void *ptr)
-{
- unsigned long idx, vaddr = (unsigned long) ptr;
- pte_t *pte;
-
- if (vaddr < FIXADDR_START)
- return virt_to_page(ptr);
-
- idx = virt_to_fix(vaddr);
- pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
- return pte_page(*pte);
-}
-
#define flush_cache_kmaps() flush_cache_all()
diff --git a/kernel/arch/powerpc/include/asm/hugetlb.h b/kernel/arch/powerpc/include/asm/hugetlb.h
index 1d53a65b4..7eac89b9f 100644
--- a/kernel/arch/powerpc/include/asm/hugetlb.h
+++ b/kernel/arch/powerpc/include/asm/hugetlb.h
@@ -112,11 +112,6 @@ static inline int prepare_hugepage_range(struct file *file,
return 0;
}
-static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
-{
-}
-
-
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
@@ -173,15 +168,6 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return *ptep;
}
-static inline int arch_prepare_hugepage(struct page *page)
-{
- return 0;
-}
-
-static inline void arch_release_hugepage(struct page *page)
-{
-}
-
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
diff --git a/kernel/arch/powerpc/include/asm/icswx.h b/kernel/arch/powerpc/include/asm/icswx.h
new file mode 100644
index 000000000..9f8402b35
--- /dev/null
+++ b/kernel/arch/powerpc/include/asm/icswx.h
@@ -0,0 +1,184 @@
+/*
+ * ICSWX api
+ *
+ * Copyright (C) 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This provides the Initiate Coprocessor Store Word Indexed (ICSWX)
+ * instruction. This instruction is used to communicate with PowerPC
+ * coprocessors. This also provides definitions of the structures used
+ * to communicate with the coprocessor.
+ *
+ * The RFC02130: Coprocessor Architecture document is the reference for
+ * everything in this file unless otherwise noted.
+ */
+#ifndef _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_
+#define _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_
+
+#include <asm/ppc-opcode.h> /* for PPC_ICSWX */
+
+/* Chapter 6.5.8 Coprocessor-Completion Block (CCB) */
+
+#define CCB_VALUE (0x3fffffffffffffff)
+#define CCB_ADDRESS (0xfffffffffffffff8)
+#define CCB_CM (0x0000000000000007)
+#define CCB_CM0 (0x0000000000000004)
+#define CCB_CM12 (0x0000000000000003)
+
+#define CCB_CM0_ALL_COMPLETIONS (0x0)
+#define CCB_CM0_LAST_IN_CHAIN (0x4)
+#define CCB_CM12_STORE (0x0)
+#define CCB_CM12_INTERRUPT (0x1)
+
+#define CCB_SIZE (0x10)
+#define CCB_ALIGN CCB_SIZE
+
+struct coprocessor_completion_block {
+ __be64 value;
+ __be64 address;
+} __packed __aligned(CCB_ALIGN);
+
+
+/* Chapter 6.5.7 Coprocessor-Status Block (CSB) */
+
+#define CSB_V (0x80)
+#define CSB_F (0x04)
+#define CSB_CH (0x03)
+#define CSB_CE_INCOMPLETE (0x80)
+#define CSB_CE_TERMINATION (0x40)
+#define CSB_CE_TPBC (0x20)
+
+#define CSB_CC_SUCCESS (0)
+#define CSB_CC_INVALID_ALIGN (1)
+#define CSB_CC_OPERAND_OVERLAP (2)
+#define CSB_CC_DATA_LENGTH (3)
+#define CSB_CC_TRANSLATION (5)
+#define CSB_CC_PROTECTION (6)
+#define CSB_CC_RD_EXTERNAL (7)
+#define CSB_CC_INVALID_OPERAND (8)
+#define CSB_CC_PRIVILEGE (9)
+#define CSB_CC_INTERNAL (10)
+#define CSB_CC_WR_EXTERNAL (12)
+#define CSB_CC_NOSPC (13)
+#define CSB_CC_EXCESSIVE_DDE (14)
+#define CSB_CC_WR_TRANSLATION (15)
+#define CSB_CC_WR_PROTECTION (16)
+#define CSB_CC_UNKNOWN_CODE (17)
+#define CSB_CC_ABORT (18)
+#define CSB_CC_TRANSPORT (20)
+#define CSB_CC_SEGMENTED_DDL (31)
+#define CSB_CC_PROGRESS_POINT (32)
+#define CSB_CC_DDE_OVERFLOW (33)
+#define CSB_CC_SESSION (34)
+#define CSB_CC_PROVISION (36)
+#define CSB_CC_CHAIN (37)
+#define CSB_CC_SEQUENCE (38)
+#define CSB_CC_HW (39)
+
+#define CSB_SIZE (0x10)
+#define CSB_ALIGN CSB_SIZE
+
+struct coprocessor_status_block {
+ u8 flags;
+ u8 cs;
+ u8 cc;
+ u8 ce;
+ __be32 count;
+ __be64 address;
+} __packed __aligned(CSB_ALIGN);
+
+
+/* Chapter 6.5.10 Data-Descriptor List (DDL)
+ * each list contains one or more Data-Descriptor Entries (DDE)
+ */
+
+#define DDE_P (0x8000)
+
+#define DDE_SIZE (0x10)
+#define DDE_ALIGN DDE_SIZE
+
+struct data_descriptor_entry {
+ __be16 flags;
+ u8 count;
+ u8 index;
+ __be32 length;
+ __be64 address;
+} __packed __aligned(DDE_ALIGN);
+
+
+/* Chapter 6.5.2 Coprocessor-Request Block (CRB) */
+
+#define CRB_SIZE (0x80)
+#define CRB_ALIGN (0x100) /* Errata: requires 256 alignment */
+
+/* Coprocessor Status Block field
+ * ADDRESS address of CSB
+ * C CCB is valid
+ * AT 0 = addrs are virtual, 1 = addrs are phys
+ * M enable perf monitor
+ */
+#define CRB_CSB_ADDRESS (0xfffffffffffffff0)
+#define CRB_CSB_C (0x0000000000000008)
+#define CRB_CSB_AT (0x0000000000000002)
+#define CRB_CSB_M (0x0000000000000001)
+
+struct coprocessor_request_block {
+ __be32 ccw;
+ __be32 flags;
+ __be64 csb_addr;
+
+ struct data_descriptor_entry source;
+ struct data_descriptor_entry target;
+
+ struct coprocessor_completion_block ccb;
+
+ u8 reserved[48];
+
+ struct coprocessor_status_block csb;
+} __packed __aligned(CRB_ALIGN);
+
+
+/* RFC02167 Initiate Coprocessor Instructions document
+ * Chapter 8.2.1.1.1 RS
+ * Chapter 8.2.3 Coprocessor Directive
+ * Chapter 8.2.4 Execution
+ *
+ * The CCW must be converted to BE before passing to icswx()
+ */
+
+#define CCW_PS (0xff000000)
+#define CCW_CT (0x00ff0000)
+#define CCW_CD (0x0000ffff)
+#define CCW_CL (0x0000c000)
+
+
+/* RFC02167 Initiate Coprocessor Instructions document
+ * Chapter 8.2.1 Initiate Coprocessor Store Word Indexed (ICSWX)
+ * Chapter 8.2.4.1 Condition Register 0
+ */
+
+#define ICSWX_INITIATED (0x8)
+#define ICSWX_BUSY (0x4)
+#define ICSWX_REJECTED (0x2)
+
+static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
+{
+ __be64 ccw_reg = ccw;
+ u32 cr;
+
+ __asm__ __volatile__(
+ PPC_ICSWX(%1,0,%2) "\n"
+ "mfcr %0\n"
+ : "=r" (cr)
+ : "r" (ccw_reg), "r" (crb)
+ : "cr0", "memory");
+
+ return (int)((cr >> 28) & 0xf);
+}
+
+
+#endif /* _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_ */
diff --git a/kernel/arch/powerpc/include/asm/io.h b/kernel/arch/powerpc/include/asm/io.h
index a8d2ef30d..5879fde56 100644
--- a/kernel/arch/powerpc/include/asm/io.h
+++ b/kernel/arch/powerpc/include/asm/io.h
@@ -721,6 +721,7 @@ extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
unsigned long flags);
extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
#define ioremap_nocache(addr, size) ioremap((addr), (size))
+#define ioremap_uc(addr, size) ioremap((addr), (size))
extern void iounmap(volatile void __iomem *addr);
diff --git a/kernel/arch/powerpc/include/asm/iommu.h b/kernel/arch/powerpc/include/asm/iommu.h
index 1e27d6338..7b87bab09 100644
--- a/kernel/arch/powerpc/include/asm/iommu.h
+++ b/kernel/arch/powerpc/include/asm/iommu.h
@@ -2,17 +2,17 @@
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
* Rewrite, cleanup:
* Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
@@ -44,6 +44,39 @@
extern int iommu_is_off;
extern int iommu_force_on;
+struct iommu_table_ops {
+ /*
+ * When called with direction==DMA_NONE, it is equal to clear().
+ * uaddr is a linear map address.
+ */
+ int (*set)(struct iommu_table *tbl,
+ long index, long npages,
+ unsigned long uaddr,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs);
+#ifdef CONFIG_IOMMU_API
+ /*
+ * Exchanges existing TCE with new TCE plus direction bits;
+ * returns old TCE and DMA direction mask.
+ * @tce is a physical address.
+ */
+ int (*exchange)(struct iommu_table *tbl,
+ long index,
+ unsigned long *hpa,
+ enum dma_data_direction *direction);
+#endif
+ void (*clear)(struct iommu_table *tbl,
+ long index, long npages);
+ /* get() returns a physical address */
+ unsigned long (*get)(struct iommu_table *tbl, long index);
+ void (*flush)(struct iommu_table *tbl);
+ void (*free)(struct iommu_table *tbl);
+};
+
+/* These are used by VIO */
+extern struct iommu_table_ops iommu_table_lpar_multi_ops;
+extern struct iommu_table_ops iommu_table_pseries_ops;
+
/*
* IOMAP_MAX_ORDER defines the largest contiguous block
* of dma space we can get. IOMAP_MAX_ORDER = 13
@@ -64,6 +97,9 @@ struct iommu_pool {
struct iommu_table {
unsigned long it_busno; /* Bus number this table belongs to */
unsigned long it_size; /* Size of iommu table in entries */
+ unsigned long it_indirect_levels;
+ unsigned long it_level_size;
+ unsigned long it_allocated_size;
unsigned long it_offset; /* Offset into global table */
unsigned long it_base; /* mapped address of tce table */
unsigned long it_index; /* which iommu table this is */
@@ -75,15 +111,16 @@ struct iommu_table {
struct iommu_pool pools[IOMMU_NR_POOLS];
unsigned long *it_map; /* A simple allocation bitmap for now */
unsigned long it_page_shift;/* table iommu page size */
-#ifdef CONFIG_IOMMU_API
- struct iommu_group *it_group;
-#endif
- void (*set_bypass)(struct iommu_table *tbl, bool enable);
-#ifdef CONFIG_PPC_POWERNV
- void *data;
-#endif
+ struct list_head it_group_list;/* List of iommu_table_group_link */
+ unsigned long *it_userspace; /* userspace view of the table */
+ struct iommu_table_ops *it_ops;
};
+#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
+ ((tbl)->it_userspace ? \
+ &((tbl)->it_userspace[(entry) - (tbl)->it_offset]) : \
+ NULL)
+
/* Pure 2^n version of get_order */
static inline __attribute_const__
int get_iommu_order(unsigned long size, struct iommu_table *tbl)
@@ -94,16 +131,21 @@ int get_iommu_order(unsigned long size, struct iommu_table *tbl)
struct scatterlist;
-static inline void set_iommu_table_base(struct device *dev, void *base)
+#ifdef CONFIG_PPC64
+
+static inline void set_iommu_table_base(struct device *dev,
+ struct iommu_table *base)
{
- dev->archdata.dma_data.iommu_table_base = base;
+ dev->archdata.iommu_table_base = base;
}
static inline void *get_iommu_table_base(struct device *dev)
{
- return dev->archdata.dma_data.iommu_table_base;
+ return dev->archdata.iommu_table_base;
}
+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
+
/* Frees table for an individual device node */
extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
@@ -112,14 +154,62 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
*/
extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
int nid);
+#define IOMMU_TABLE_GROUP_MAX_TABLES 2
+
+struct iommu_table_group;
+
+struct iommu_table_group_ops {
+ unsigned long (*get_table_size)(
+ __u32 page_shift,
+ __u64 window_size,
+ __u32 levels);
+ long (*create_table)(struct iommu_table_group *table_group,
+ int num,
+ __u32 page_shift,
+ __u64 window_size,
+ __u32 levels,
+ struct iommu_table **ptbl);
+ long (*set_window)(struct iommu_table_group *table_group,
+ int num,
+ struct iommu_table *tblnew);
+ long (*unset_window)(struct iommu_table_group *table_group,
+ int num);
+ /* Switch ownership from platform code to external user (e.g. VFIO) */
+ void (*take_ownership)(struct iommu_table_group *table_group);
+ /* Switch ownership from external user (e.g. VFIO) back to core */
+ void (*release_ownership)(struct iommu_table_group *table_group);
+};
+
+struct iommu_table_group_link {
+ struct list_head next;
+ struct rcu_head rcu;
+ struct iommu_table_group *table_group;
+};
+
+struct iommu_table_group {
+ /* IOMMU properties */
+ __u32 tce32_start;
+ __u32 tce32_size;
+ __u64 pgsizes; /* Bitmap of supported page sizes */
+ __u32 max_dynamic_windows_supported;
+ __u32 max_levels;
+
+ struct iommu_group *group;
+ struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
+ struct iommu_table_group_ops *ops;
+};
+
#ifdef CONFIG_IOMMU_API
-extern void iommu_register_group(struct iommu_table *tbl,
+
+extern void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number, unsigned long pe_num);
extern int iommu_add_device(struct device *dev);
extern void iommu_del_device(struct device *dev);
extern int __init tce_iommu_bus_notifier_init(void);
+extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
+ unsigned long *hpa, enum dma_data_direction *direction);
#else
-static inline void iommu_register_group(struct iommu_table *tbl,
+static inline void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number,
unsigned long pe_num)
{
@@ -140,13 +230,20 @@ static inline int __init tce_iommu_bus_notifier_init(void)
}
#endif /* !CONFIG_IOMMU_API */
-static inline void set_iommu_table_base_and_group(struct device *dev,
- void *base)
+#else
+
+static inline void *get_iommu_table_base(struct device *dev)
{
- set_iommu_table_base(dev, base);
- iommu_add_device(dev);
+ return NULL;
}
+static inline int dma_iommu_dma_supported(struct device *dev, u64 mask)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PPC64 */
+
extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
unsigned long mask,
@@ -197,20 +294,13 @@ extern int iommu_tce_clear_param_check(struct iommu_table *tbl,
unsigned long npages);
extern int iommu_tce_put_param_check(struct iommu_table *tbl,
unsigned long ioba, unsigned long tce);
-extern int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
- unsigned long hwaddr, enum dma_data_direction direction);
-extern unsigned long iommu_clear_tce(struct iommu_table *tbl,
- unsigned long entry);
-extern int iommu_clear_tces_and_put_pages(struct iommu_table *tbl,
- unsigned long entry, unsigned long pages);
-extern int iommu_put_tce_user_mode(struct iommu_table *tbl,
- unsigned long entry, unsigned long tce);
extern void iommu_flush_tce(struct iommu_table *tbl);
extern int iommu_take_ownership(struct iommu_table *tbl);
extern void iommu_release_ownership(struct iommu_table *tbl);
extern enum dma_data_direction iommu_tce_direction(unsigned long tce);
+extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir);
#endif /* __KERNEL__ */
#endif /* _ASM_IOMMU_H */
diff --git a/kernel/arch/powerpc/include/asm/jump_label.h b/kernel/arch/powerpc/include/asm/jump_label.h
index efbf9a322..47e155f15 100644
--- a/kernel/arch/powerpc/include/asm/jump_label.h
+++ b/kernel/arch/powerpc/include/asm/jump_label.h
@@ -18,14 +18,29 @@
#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
#define JUMP_LABEL_NOP_SIZE 4
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
".popsection \n\t"
- : : "i" (key) : : l_yes);
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ "b %l[l_yes]\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+ ".popsection \n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
return false;
l_yes:
return true;
diff --git a/kernel/arch/powerpc/include/asm/kvm_book3s.h b/kernel/arch/powerpc/include/asm/kvm_book3s.h
index b91e74a81..9fac01cb8 100644
--- a/kernel/arch/powerpc/include/asm/kvm_book3s.h
+++ b/kernel/arch/powerpc/include/asm/kvm_book3s.h
@@ -158,6 +158,7 @@ extern pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
unsigned long *rmap, long pte_index, int realmode);
+extern void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize);
extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index);
void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
@@ -225,12 +226,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
return vcpu->arch.cr;
}
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.xer = val;
}
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.xer;
}
diff --git a/kernel/arch/powerpc/include/asm/kvm_book3s_64.h b/kernel/arch/powerpc/include/asm/kvm_book3s_64.h
index 3536d12eb..2aa79c864 100644
--- a/kernel/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/kernel/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -430,7 +430,7 @@ static inline void note_hpte_modification(struct kvm *kvm,
*/
static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
{
- return rcu_dereference_raw_notrace(kvm->memslots);
+ return rcu_dereference_raw_notrace(kvm->memslots[0]);
}
extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
diff --git a/kernel/arch/powerpc/include/asm/kvm_book3s_asm.h b/kernel/arch/powerpc/include/asm/kvm_book3s_asm.h
index 5bdfb5dd3..72b6225ac 100644
--- a/kernel/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/kernel/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -25,6 +25,12 @@
#define XICS_MFRR 0xc
#define XICS_IPI 2 /* interrupt source # for IPIs */
+/* Maximum number of threads per physical core */
+#define MAX_SMT_THREADS 8
+
+/* Maximum number of subcores per physical core */
+#define MAX_SUBCORES 4
+
#ifdef __ASSEMBLY__
#ifdef CONFIG_KVM_BOOK3S_HANDLER
@@ -65,6 +71,19 @@ kvmppc_resume_\intno:
#else /*__ASSEMBLY__ */
+struct kvmppc_vcore;
+
+/* Struct used for coordinating micro-threading (split-core) mode changes */
+struct kvm_split_mode {
+ unsigned long rpr;
+ unsigned long pmmar;
+ unsigned long ldbar;
+ u8 subcore_size;
+ u8 do_nap;
+ u8 napped[MAX_SMT_THREADS];
+ struct kvmppc_vcore *master_vcs[MAX_SUBCORES];
+};
+
/*
* This struct goes in the PACA on 64-bit processors. It is used
* to store host state that needs to be saved when we enter a guest
@@ -100,6 +119,7 @@ struct kvmppc_host_state {
u64 host_spurr;
u64 host_dscr;
u64 dec_expires;
+ struct kvm_split_mode *kvm_split_mode;
#endif
#ifdef CONFIG_PPC_BOOK3S_64
u64 cfar;
@@ -112,7 +132,7 @@ struct kvmppc_book3s_shadow_vcpu {
bool in_use;
ulong gpr[14];
u32 cr;
- u32 xer;
+ ulong xer;
ulong ctr;
ulong lr;
ulong pc;
diff --git a/kernel/arch/powerpc/include/asm/kvm_booke.h b/kernel/arch/powerpc/include/asm/kvm_booke.h
index 3286f0d6a..bc6e29e4d 100644
--- a/kernel/arch/powerpc/include/asm/kvm_booke.h
+++ b/kernel/arch/powerpc/include/asm/kvm_booke.h
@@ -54,12 +54,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
return vcpu->arch.cr;
}
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.xer = val;
}
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.xer;
}
diff --git a/kernel/arch/powerpc/include/asm/kvm_host.h b/kernel/arch/powerpc/include/asm/kvm_host.h
index a1ddf4080..f8673ff84 100644
--- a/kernel/arch/powerpc/include/asm/kvm_host.h
+++ b/kernel/arch/powerpc/include/asm/kvm_host.h
@@ -44,6 +44,7 @@
#ifdef CONFIG_KVM_MMIO
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#endif
+#define KVM_HALT_POLL_NS_DEFAULT 500000
/* These values are internal and can be increased later */
#define KVM_NR_IRQCHIPS 1
@@ -108,6 +109,7 @@ struct kvm_vcpu_stat {
u32 dec_exits;
u32 ext_intr_exits;
u32 halt_successful_poll;
+ u32 halt_attempted_poll;
u32 halt_wakeup;
u32 dbell_exits;
u32 gdbell_exits;
@@ -205,8 +207,10 @@ struct revmap_entry {
*/
#define KVMPPC_RMAP_LOCK_BIT 63
#define KVMPPC_RMAP_RC_SHIFT 32
+#define KVMPPC_RMAP_CHG_SHIFT 48
#define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
#define KVMPPC_RMAP_CHANGED (HPTE_R_C << KVMPPC_RMAP_RC_SHIFT)
+#define KVMPPC_RMAP_CHG_ORDER (0x3ful << KVMPPC_RMAP_CHG_SHIFT)
#define KVMPPC_RMAP_PRESENT 0x100000000ul
#define KVMPPC_RMAP_INDEX 0xfffffffful
@@ -278,9 +282,11 @@ struct kvmppc_vcore {
u16 last_cpu;
u8 vcore_state;
u8 in_guest;
+ struct kvmppc_vcore *master_vcore;
struct list_head runnable_threads;
+ struct list_head preempt_list;
spinlock_t lock;
- struct swait_head wq;
+ struct swait_queue_head wq;
spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
u64 stolen_tb;
u64 preempt_tb;
@@ -291,8 +297,6 @@ struct kvmppc_vcore {
u32 arch_compat;
ulong pcr;
ulong dpdes; /* doorbell state (POWER8) */
- void *mpp_buffer; /* Micro Partition Prefetch buffer */
- bool mpp_buffer_is_valid;
ulong conferring_threads;
};
@@ -300,12 +304,21 @@ struct kvmppc_vcore {
#define VCORE_EXIT_MAP(vc) ((vc)->entry_exit_map >> 8)
#define VCORE_IS_EXITING(vc) (VCORE_EXIT_MAP(vc) != 0)
-/* Values for vcore_state */
+/* This bit is used when a vcore exit is triggered from outside the vcore */
+#define VCORE_EXIT_REQ 0x10000
+
+/*
+ * Values for vcore_state.
+ * Note that these are arranged such that lower values
+ * (< VCORE_SLEEPING) don't require stolen time accounting
+ * on load/unload, and higher values do.
+ */
#define VCORE_INACTIVE 0
-#define VCORE_SLEEPING 1
-#define VCORE_PREEMPT 2
-#define VCORE_RUNNING 3
-#define VCORE_EXITING 4
+#define VCORE_PREEMPT 1
+#define VCORE_PIGGYBACK 2
+#define VCORE_SLEEPING 3
+#define VCORE_RUNNING 4
+#define VCORE_EXITING 5
/*
* Struct used to manage memory for a virtual processor area
@@ -473,7 +486,7 @@ struct kvm_vcpu_arch {
ulong ciabr;
ulong cfar;
ulong ppr;
- ulong pspb;
+ u32 pspb;
ulong fscr;
ulong shadow_fscr;
ulong ebbhr;
@@ -613,12 +626,13 @@ struct kvm_vcpu_arch {
u8 prodded;
u32 last_inst;
- struct swait_head *wqp;
+ struct swait_queue_head *wqp;
struct kvmppc_vcore *vcore;
int ret;
int trap;
int state;
int ptid;
+ int thread_cpu;
bool timer_running;
wait_queue_head_t cpu_run;
@@ -698,9 +712,11 @@ struct kvm_vcpu_arch {
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_exit(void) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/kernel/arch/powerpc/include/asm/kvm_ppc.h b/kernel/arch/powerpc/include/asm/kvm_ppc.h
index b8475daad..c6ef05bd0 100644
--- a/kernel/arch/powerpc/include/asm/kvm_ppc.h
+++ b/kernel/arch/powerpc/include/asm/kvm_ppc.h
@@ -182,10 +182,11 @@ extern int kvmppc_core_create_memslot(struct kvm *kvm,
unsigned long npages);
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region *mem);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old);
+ const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new);
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
struct kvm_ppc_smmu_info *info);
extern void kvmppc_core_flush_memslot(struct kvm *kvm,
@@ -243,10 +244,11 @@ struct kvmppc_ops {
void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
int (*prepare_memory_region)(struct kvm *kvm,
struct kvm_memory_slot *memslot,
- struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region *mem);
void (*commit_memory_region)(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old);
+ const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new);
int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
unsigned long end);
diff --git a/kernel/arch/powerpc/include/asm/machdep.h b/kernel/arch/powerpc/include/asm/machdep.h
index ef8899432..3f191f573 100644
--- a/kernel/arch/powerpc/include/asm/machdep.h
+++ b/kernel/arch/powerpc/include/asm/machdep.h
@@ -61,35 +61,15 @@ struct machdep_calls {
unsigned long addr,
unsigned char *hpte_slot_array,
int psize, int ssize, int local);
- /* special for kexec, to be called in real mode, linear mapping is
- * destroyed as well */
+ /*
+ * Special for kexec.
+ * To be called in real mode with interrupts disabled. No locks are
+ * taken as such, concurrent access on pre POWER5 hardware could result
+ * in a deadlock.
+ * The linear mapping is destroyed as well.
+ */
void (*hpte_clear_all)(void);
- int (*tce_build)(struct iommu_table *tbl,
- long index,
- long npages,
- unsigned long uaddr,
- enum dma_data_direction direction,
- struct dma_attrs *attrs);
- void (*tce_free)(struct iommu_table *tbl,
- long index,
- long npages);
- unsigned long (*tce_get)(struct iommu_table *tbl,
- long index);
- void (*tce_flush)(struct iommu_table *tbl);
-
- /* _rm versions are for real mode use only */
- int (*tce_build_rm)(struct iommu_table *tbl,
- long index,
- long npages,
- unsigned long uaddr,
- enum dma_data_direction direction,
- struct dma_attrs *attrs);
- void (*tce_free_rm)(struct iommu_table *tbl,
- long index,
- long npages);
- void (*tce_flush_rm)(struct iommu_table *tbl);
-
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
unsigned long flags, void *caller);
void (*iounmap)(volatile void __iomem *token);
@@ -131,12 +111,6 @@ struct machdep_calls {
/* To setup PHBs when using automatic OF platform driver for PCI */
int (*pci_setup_phb)(struct pci_controller *host);
-#ifdef CONFIG_PCI_MSI
- int (*setup_msi_irqs)(struct pci_dev *dev,
- int nvec, int type);
- void (*teardown_msi_irqs)(struct pci_dev *dev);
-#endif
-
void (*restart)(char *cmd);
void (*halt)(void);
void (*panic)(char *str);
@@ -280,7 +254,7 @@ struct machdep_calls {
#endif
#ifdef CONFIG_ARCH_RANDOM
- int (*get_random_long)(unsigned long *v);
+ int (*get_random_seed)(unsigned long *v);
#endif
};
diff --git a/kernel/arch/powerpc/include/asm/mm-arch-hooks.h b/kernel/arch/powerpc/include/asm/mm-arch-hooks.h
new file mode 100644
index 000000000..f2a2da895
--- /dev/null
+++ b/kernel/arch/powerpc/include/asm/mm-arch-hooks.h
@@ -0,0 +1,28 @@
+/*
+ * Architecture specific mm hooks
+ *
+ * Copyright (C) 2015, IBM Corporation
+ * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_POWERPC_MM_ARCH_HOOKS_H
+#define _ASM_POWERPC_MM_ARCH_HOOKS_H
+
+static inline void arch_remap(struct mm_struct *mm,
+ unsigned long old_start, unsigned long old_end,
+ unsigned long new_start, unsigned long new_end)
+{
+ /*
+ * mremap() doesn't allow moving multiple vmas so we can limit the
+ * check to old_start == vdso_base.
+ */
+ if (old_start == mm->context.vdso_base)
+ mm->context.vdso_base = new_start;
+}
+#define arch_remap arch_remap
+
+#endif /* _ASM_POWERPC_MM_ARCH_HOOKS_H */
diff --git a/kernel/arch/powerpc/include/asm/mmu-8xx.h b/kernel/arch/powerpc/include/asm/mmu-8xx.h
index 986b9e1e1..f05500a29 100644
--- a/kernel/arch/powerpc/include/asm/mmu-8xx.h
+++ b/kernel/arch/powerpc/include/asm/mmu-8xx.h
@@ -27,6 +27,19 @@
#define MI_Ks 0x80000000 /* Should not be set */
#define MI_Kp 0x40000000 /* Should always be set */
+/*
+ * All pages' PP exec bits are set to 000, which means Execute for Supervisor
+ * and no Execute for User.
+ * Then we use the APG to say whether accesses are according to Page rules,
+ * "all Supervisor" rules (Exec for all) and "all User" rules (Exec for noone)
+ * Therefore, we define 4 APG groups. msb is _PAGE_EXEC, lsb is _PAGE_USER
+ * 0 (00) => Not User, no exec => 11 (all accesses performed as user)
+ * 1 (01) => User but no exec => 11 (all accesses performed as user)
+ * 2 (10) => Not User, exec => 01 (rights according to page definition)
+ * 3 (11) => User, exec => 00 (all accesses performed as supervisor)
+ */
+#define MI_APG_INIT 0xf4ffffff
+
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MI_RPN is written, bits in
* this register are used to create the TLB entry.
@@ -87,6 +100,19 @@
#define MD_Ks 0x80000000 /* Should not be set */
#define MD_Kp 0x40000000 /* Should always be set */
+/*
+ * All pages' PP data bits are set to either 000 or 011, which means
+ * respectively RW for Supervisor and no access for User, or RO for
+ * Supervisor and no access for user.
+ * Then we use the APG to say whether accesses are according to Page rules or
+ * "all Supervisor" rules (Access to all)
+ * Therefore, we define 2 APG groups. lsb is _PAGE_USER
+ * 0 => No user => 01 (all accesses performed according to page definition)
+ * 1 => User => 00 (all accesses performed as supervisor
+ * according to page definition)
+ */
+#define MD_APG_INIT 0x4fffffff
+
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MD_RPN is written, bits in
* this register are used to create the TLB entry.
@@ -145,7 +171,14 @@ typedef struct {
} mm_context_t;
#endif /* !__ASSEMBLY__ */
+#if (PAGE_SHIFT == 12)
#define mmu_virtual_psize MMU_PAGE_4K
+#elif (PAGE_SHIFT == 14)
+#define mmu_virtual_psize MMU_PAGE_16K
+#else
+#error "Unsupported PAGE_SIZE"
+#endif
+
#define mmu_linear_psize MMU_PAGE_8M
#endif /* _ASM_POWERPC_MMU_8XX_H_ */
diff --git a/kernel/arch/powerpc/include/asm/mmu-hash64.h b/kernel/arch/powerpc/include/asm/mmu-hash64.h
index 1da6a81ce..ba3342bbd 100644
--- a/kernel/arch/powerpc/include/asm/mmu-hash64.h
+++ b/kernel/arch/powerpc/include/asm/mmu-hash64.h
@@ -14,6 +14,7 @@
#include <asm/asm-compat.h>
#include <asm/page.h>
+#include <asm/bug.h>
/*
* This is necessary to get the definition of PGTABLE_RANGE which we
@@ -536,6 +537,9 @@ typedef struct {
/* for 4K PTE fragment support */
void *pte_frag;
#endif
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ struct list_head iommu_group_mem_list;
+#endif
} mm_context_t;
diff --git a/kernel/arch/powerpc/include/asm/mmu_context.h b/kernel/arch/powerpc/include/asm/mmu_context.h
index 73382eba0..878c27771 100644
--- a/kernel/arch/powerpc/include/asm/mmu_context.h
+++ b/kernel/arch/powerpc/include/asm/mmu_context.h
@@ -8,7 +8,6 @@
#include <linux/spinlock.h>
#include <asm/mmu.h>
#include <asm/cputable.h>
-#include <asm-generic/mm_hooks.h>
#include <asm/cputhreads.h>
/*
@@ -16,6 +15,24 @@
*/
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+struct mm_iommu_table_group_mem_t;
+
+extern bool mm_iommu_preregistered(void);
+extern long mm_iommu_get(unsigned long ua, unsigned long entries,
+ struct mm_iommu_table_group_mem_t **pmem);
+extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem);
+extern void mm_iommu_init(mm_context_t *ctx);
+extern void mm_iommu_cleanup(mm_context_t *ctx);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
+ unsigned long size);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
+ unsigned long entries);
+extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
+ unsigned long ua, unsigned long *hpa);
+extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
+extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
+#endif
extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
@@ -109,5 +126,27 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
#endif
}
+static inline void arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
+static inline void arch_unmap(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
+ mm->context.vdso_base = 0;
+}
+
+static inline void arch_bprm_mm_init(struct mm_struct *mm,
+ struct vm_area_struct *vma)
+{
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/kernel/arch/powerpc/include/asm/mpc5121.h b/kernel/arch/powerpc/include/asm/mpc5121.h
index 4a69cd1d5..deaeb0b1f 100644
--- a/kernel/arch/powerpc/include/asm/mpc5121.h
+++ b/kernel/arch/powerpc/include/asm/mpc5121.h
@@ -60,4 +60,63 @@ struct mpc512x_lpc {
int mpc512x_cs_config(unsigned int cs, u32 val);
+/*
+ * SCLPC Module (LPB FIFO)
+ */
+struct mpc512x_lpbfifo {
+ u32 pkt_size; /* SCLPC Packet Size Register */
+ u32 start_addr; /* SCLPC Start Address Register */
+ u32 ctrl; /* SCLPC Control Register */
+ u32 enable; /* SCLPC Enable Register */
+ u32 reserved1;
+ u32 status; /* SCLPC Status Register */
+ u32 bytes_done; /* SCLPC Bytes Done Register */
+ u32 emb_sc; /* EMB Share Counter Register */
+ u32 emb_pc; /* EMB Pause Control Register */
+ u32 reserved2[7];
+ u32 data_word; /* LPC RX/TX FIFO Data Word Register */
+ u32 fifo_status; /* LPC RX/TX FIFO Status Register */
+ u32 fifo_ctrl; /* LPC RX/TX FIFO Control Register */
+ u32 fifo_alarm; /* LPC RX/TX FIFO Alarm Register */
+};
+
+#define MPC512X_SCLPC_START (1 << 31)
+#define MPC512X_SCLPC_CS(x) (((x) & 0x7) << 24)
+#define MPC512X_SCLPC_FLUSH (1 << 17)
+#define MPC512X_SCLPC_READ (1 << 16)
+#define MPC512X_SCLPC_DAI (1 << 8)
+#define MPC512X_SCLPC_BPT(x) ((x) & 0x3f)
+#define MPC512X_SCLPC_RESET (1 << 24)
+#define MPC512X_SCLPC_FIFO_RESET (1 << 16)
+#define MPC512X_SCLPC_ABORT_INT_ENABLE (1 << 9)
+#define MPC512X_SCLPC_NORM_INT_ENABLE (1 << 8)
+#define MPC512X_SCLPC_ENABLE (1 << 0)
+#define MPC512X_SCLPC_SUCCESS (1 << 24)
+#define MPC512X_SCLPC_FIFO_CTRL(x) (((x) & 0x7) << 24)
+#define MPC512X_SCLPC_FIFO_ALARM(x) ((x) & 0x3ff)
+
+enum lpb_dev_portsize {
+ LPB_DEV_PORTSIZE_UNDEFINED = 0,
+ LPB_DEV_PORTSIZE_1_BYTE = 1,
+ LPB_DEV_PORTSIZE_2_BYTES = 2,
+ LPB_DEV_PORTSIZE_4_BYTES = 4,
+ LPB_DEV_PORTSIZE_8_BYTES = 8
+};
+
+enum mpc512x_lpbfifo_req_dir {
+ MPC512X_LPBFIFO_REQ_DIR_READ,
+ MPC512X_LPBFIFO_REQ_DIR_WRITE
+};
+
+struct mpc512x_lpbfifo_request {
+ phys_addr_t dev_phys_addr; /* physical address of some device on LPB */
+ void *ram_virt_addr; /* virtual address of some region in RAM */
+ u32 size;
+ enum lpb_dev_portsize portsize;
+ enum mpc512x_lpbfifo_req_dir dir;
+ void (*callback)(struct mpc512x_lpbfifo_request *);
+};
+
+int mpc512x_lpbfifo_submit(struct mpc512x_lpbfifo_request *req);
+
#endif /* __ASM_POWERPC_MPC5121_H__ */
diff --git a/kernel/arch/powerpc/include/asm/mpc52xx_psc.h b/kernel/arch/powerpc/include/asm/mpc52xx_psc.h
index d0ece257d..ec995b289 100644
--- a/kernel/arch/powerpc/include/asm/mpc52xx_psc.h
+++ b/kernel/arch/powerpc/include/asm/mpc52xx_psc.h
@@ -150,7 +150,10 @@
/* Structure of the hardware registers */
struct mpc52xx_psc {
- u8 mode; /* PSC + 0x00 */
+ union {
+ u8 mode; /* PSC + 0x00 */
+ u8 mr2;
+ };
u8 reserved0[3];
union { /* PSC + 0x04 */
u16 status;
@@ -258,8 +261,6 @@ struct mpc52xx_psc_fifo {
#define MPC512x_PSC_FIFO_FULL 0x2
#define MPC512x_PSC_FIFO_ALARM 0x4
#define MPC512x_PSC_FIFO_URERR 0x8
-#define MPC512x_PSC_FIFO_ORERR 0x01
-#define MPC512x_PSC_FIFO_MEMERROR 0x02
struct mpc512x_psc_fifo {
u32 reserved1[10];
diff --git a/kernel/arch/powerpc/include/asm/msi_bitmap.h b/kernel/arch/powerpc/include/asm/msi_bitmap.h
index 97ac3f46a..1ec712555 100644
--- a/kernel/arch/powerpc/include/asm/msi_bitmap.h
+++ b/kernel/arch/powerpc/include/asm/msi_bitmap.h
@@ -19,6 +19,7 @@ struct msi_bitmap {
unsigned long *bitmap;
spinlock_t lock;
unsigned int irq_count;
+ bool bitmap_from_slab;
};
int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num);
diff --git a/kernel/arch/powerpc/include/asm/opal-api.h b/kernel/arch/powerpc/include/asm/opal-api.h
index 0321a909e..f8faaaeec 100644
--- a/kernel/arch/powerpc/include/asm/opal-api.h
+++ b/kernel/arch/powerpc/include/asm/opal-api.h
@@ -153,7 +153,12 @@
#define OPAL_FLASH_READ 110
#define OPAL_FLASH_WRITE 111
#define OPAL_FLASH_ERASE 112
-#define OPAL_LAST 112
+#define OPAL_PRD_MSG 113
+#define OPAL_LEDS_GET_INDICATOR 114
+#define OPAL_LEDS_SET_INDICATOR 115
+#define OPAL_CEC_REBOOT2 116
+#define OPAL_CONSOLE_FLUSH 117
+#define OPAL_LAST 117
/* Device tree flags */
@@ -165,6 +170,13 @@
#define OPAL_PM_WINKLE_ENABLED 0x00040000
#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000 /* with workaround */
+/*
+ * OPAL_CONFIG_CPU_IDLE_STATE parameters
+ */
+#define OPAL_CONFIG_IDLE_FASTSLEEP 1
+#define OPAL_CONFIG_IDLE_UNDO 0
+#define OPAL_CONFIG_IDLE_APPLY 1
+
#ifndef __ASSEMBLY__
/* Other enums */
@@ -332,6 +344,18 @@ enum OpalPciResetState {
OPAL_ASSERT_RESET = 1
};
+enum OpalSlotLedType {
+ OPAL_SLOT_LED_TYPE_ID = 0, /* IDENTIFY LED */
+ OPAL_SLOT_LED_TYPE_FAULT = 1, /* FAULT LED */
+ OPAL_SLOT_LED_TYPE_ATTN = 2, /* System Attention LED */
+ OPAL_SLOT_LED_TYPE_MAX = 3
+};
+
+enum OpalSlotLedState {
+ OPAL_SLOT_LED_STATE_OFF = 0, /* LED is OFF */
+ OPAL_SLOT_LED_STATE_ON = 1 /* LED is ON */
+};
+
/*
* Address cycle types for LPC accesses. These also correspond
* to the content of the first cell of the "reg" property for
@@ -352,6 +376,8 @@ enum opal_msg_type {
OPAL_MSG_SHUTDOWN, /* params[0] = 1 reboot, 0 shutdown */
OPAL_MSG_HMI_EVT,
OPAL_MSG_DPO,
+ OPAL_MSG_PRD,
+ OPAL_MSG_OCC,
OPAL_MSG_TYPE_MAX,
};
@@ -428,6 +454,7 @@ struct OpalMemoryErrorData {
/* HMI interrupt event */
enum OpalHMI_Version {
OpalHMIEvt_V1 = 1,
+ OpalHMIEvt_V2 = 2,
};
enum OpalHMI_Severity {
@@ -458,6 +485,49 @@ enum OpalHMI_ErrType {
OpalHMI_ERROR_CAPP_RECOVERY,
};
+enum OpalHMI_XstopType {
+ CHECKSTOP_TYPE_UNKNOWN = 0,
+ CHECKSTOP_TYPE_CORE = 1,
+ CHECKSTOP_TYPE_NX = 2,
+};
+
+enum OpalHMI_CoreXstopReason {
+ CORE_CHECKSTOP_IFU_REGFILE = 0x00000001,
+ CORE_CHECKSTOP_IFU_LOGIC = 0x00000002,
+ CORE_CHECKSTOP_PC_DURING_RECOV = 0x00000004,
+ CORE_CHECKSTOP_ISU_REGFILE = 0x00000008,
+ CORE_CHECKSTOP_ISU_LOGIC = 0x00000010,
+ CORE_CHECKSTOP_FXU_LOGIC = 0x00000020,
+ CORE_CHECKSTOP_VSU_LOGIC = 0x00000040,
+ CORE_CHECKSTOP_PC_RECOV_IN_MAINT_MODE = 0x00000080,
+ CORE_CHECKSTOP_LSU_REGFILE = 0x00000100,
+ CORE_CHECKSTOP_PC_FWD_PROGRESS = 0x00000200,
+ CORE_CHECKSTOP_LSU_LOGIC = 0x00000400,
+ CORE_CHECKSTOP_PC_LOGIC = 0x00000800,
+ CORE_CHECKSTOP_PC_HYP_RESOURCE = 0x00001000,
+ CORE_CHECKSTOP_PC_HANG_RECOV_FAILED = 0x00002000,
+ CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED = 0x00004000,
+ CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ = 0x00008000,
+ CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ = 0x00010000,
+};
+
+enum OpalHMI_NestAccelXstopReason {
+ NX_CHECKSTOP_SHM_INVAL_STATE_ERR = 0x00000001,
+ NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1 = 0x00000002,
+ NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2 = 0x00000004,
+ NX_CHECKSTOP_DMA_CH0_INVAL_STATE_ERR = 0x00000008,
+ NX_CHECKSTOP_DMA_CH1_INVAL_STATE_ERR = 0x00000010,
+ NX_CHECKSTOP_DMA_CH2_INVAL_STATE_ERR = 0x00000020,
+ NX_CHECKSTOP_DMA_CH3_INVAL_STATE_ERR = 0x00000040,
+ NX_CHECKSTOP_DMA_CH4_INVAL_STATE_ERR = 0x00000080,
+ NX_CHECKSTOP_DMA_CH5_INVAL_STATE_ERR = 0x00000100,
+ NX_CHECKSTOP_DMA_CH6_INVAL_STATE_ERR = 0x00000200,
+ NX_CHECKSTOP_DMA_CH7_INVAL_STATE_ERR = 0x00000400,
+ NX_CHECKSTOP_DMA_CRB_UE = 0x00000800,
+ NX_CHECKSTOP_DMA_CRB_SUE = 0x00001000,
+ NX_CHECKSTOP_PBI_ISN_UE = 0x00002000,
+};
+
struct OpalHMIEvent {
uint8_t version; /* 0x00 */
uint8_t severity; /* 0x01 */
@@ -468,6 +538,23 @@ struct OpalHMIEvent {
__be64 hmer;
/* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */
__be64 tfmr;
+
+ /* version 2 and later */
+ union {
+ /*
+ * checkstop info (Core/NX).
+ * Valid for OpalHMI_ERROR_MALFUNC_ALERT.
+ */
+ struct {
+ uint8_t xstop_type; /* enum OpalHMI_XstopType */
+ uint8_t reserved_1[3];
+ __be32 xstop_reason;
+ union {
+ __be32 pir; /* for CHECKSTOP_TYPE_CORE */
+ __be32 chip_id; /* for CHECKSTOP_TYPE_NX */
+ } u;
+ } xstop_error;
+ } u;
};
enum {
@@ -674,6 +761,34 @@ typedef struct oppanel_line {
__be64 line_len;
} oppanel_line_t;
+enum opal_prd_msg_type {
+ OPAL_PRD_MSG_TYPE_INIT = 0, /* HBRT --> OPAL */
+ OPAL_PRD_MSG_TYPE_FINI, /* HBRT/kernel --> OPAL */
+ OPAL_PRD_MSG_TYPE_ATTN, /* HBRT <-- OPAL */
+ OPAL_PRD_MSG_TYPE_ATTN_ACK, /* HBRT --> OPAL */
+ OPAL_PRD_MSG_TYPE_OCC_ERROR, /* HBRT <-- OPAL */
+ OPAL_PRD_MSG_TYPE_OCC_RESET, /* HBRT <-- OPAL */
+};
+
+struct opal_prd_msg_header {
+ uint8_t type;
+ uint8_t pad[1];
+ __be16 size;
+};
+
+struct opal_prd_msg;
+
+#define OCC_RESET 0
+#define OCC_LOAD 1
+#define OCC_THROTTLE 2
+#define OCC_MAX_THROTTLE_STATUS 5
+
+struct opal_occ_msg {
+ __be64 type;
+ __be64 chip;
+ __be64 throttle_status;
+};
+
/*
* SG entries
*
@@ -730,6 +845,52 @@ struct opal_i2c_request {
__be64 buffer_ra; /* Buffer real address */
};
+/*
+ * EPOW status sharing (OPAL and the host)
+ *
+ * The host will pass on OPAL, a buffer of length OPAL_SYSEPOW_MAX
+ * with individual elements being 16 bits wide to fetch the system
+ * wide EPOW status. Each element in the buffer will contain the
+ * EPOW status in it's bit representation for a particular EPOW sub
+ * class as defiend here. So multiple detailed EPOW status bits
+ * specific for any sub class can be represented in a single buffer
+ * element as it's bit representation.
+ */
+
+/* System EPOW type */
+enum OpalSysEpow {
+ OPAL_SYSEPOW_POWER = 0, /* Power EPOW */
+ OPAL_SYSEPOW_TEMP = 1, /* Temperature EPOW */
+ OPAL_SYSEPOW_COOLING = 2, /* Cooling EPOW */
+ OPAL_SYSEPOW_MAX = 3, /* Max EPOW categories */
+};
+
+/* Power EPOW */
+enum OpalSysPower {
+ OPAL_SYSPOWER_UPS = 0x0001, /* System on UPS power */
+ OPAL_SYSPOWER_CHNG = 0x0002, /* System power config change */
+ OPAL_SYSPOWER_FAIL = 0x0004, /* System impending power failure */
+ OPAL_SYSPOWER_INCL = 0x0008, /* System incomplete power */
+};
+
+/* Temperature EPOW */
+enum OpalSysTemp {
+ OPAL_SYSTEMP_AMB = 0x0001, /* System over ambient temperature */
+ OPAL_SYSTEMP_INT = 0x0002, /* System over internal temperature */
+ OPAL_SYSTEMP_HMD = 0x0004, /* System over ambient humidity */
+};
+
+/* Cooling EPOW */
+enum OpalSysCooling {
+ OPAL_SYSCOOL_INSF = 0x0001, /* System insufficient cooling */
+};
+
+/* Argument to OPAL_CEC_REBOOT2() */
+enum {
+ OPAL_REBOOT_NORMAL = 0,
+ OPAL_REBOOT_PLATFORM_ERROR = 1,
+};
+
#endif /* __ASSEMBLY__ */
#endif /* __OPAL_API_H */
diff --git a/kernel/arch/powerpc/include/asm/opal.h b/kernel/arch/powerpc/include/asm/opal.h
index 042af1abf..07a99e638 100644
--- a/kernel/arch/powerpc/include/asm/opal.h
+++ b/kernel/arch/powerpc/include/asm/opal.h
@@ -35,6 +35,7 @@ int64_t opal_console_read(int64_t term_number, __be64 *length,
uint8_t *buffer);
int64_t opal_console_write_buffer_space(int64_t term_number,
__be64 *length);
+int64_t opal_console_flush(int64_t term_number);
int64_t opal_rtc_read(__be32 *year_month_day,
__be64 *hour_minute_second_millisecond);
int64_t opal_rtc_write(uint32_t year_month_day,
@@ -44,6 +45,7 @@ int64_t opal_tpo_write(uint64_t token, uint32_t year_mon_day,
uint32_t hour_min);
int64_t opal_cec_power_down(uint64_t request);
int64_t opal_cec_reboot(void);
+int64_t opal_cec_reboot2(uint32_t reboot_type, char *diag);
int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask);
@@ -141,7 +143,8 @@ int64_t opal_pci_fence_phb(uint64_t phb_id);
int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);
int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
-int64_t opal_get_epow_status(__be64 *status);
+int64_t opal_get_epow_status(__be16 *epow_status, __be16 *num_epow_classes);
+int64_t opal_get_dpo_status(__be64 *dpo_timeout);
int64_t opal_set_system_attention_led(uint8_t led_action);
int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,
__be16 *pci_error_type, __be16 *severity);
@@ -186,6 +189,7 @@ int64_t opal_handle_hmi(void);
int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end);
int64_t opal_unregister_dump_region(uint32_t id);
int64_t opal_slw_set_reg(uint64_t cpu_pir, uint64_t sprn, uint64_t val);
+int64_t opal_config_cpu_idle_state(uint64_t state, uint64_t flag);
int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number);
int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *msg,
uint64_t msg_len);
@@ -193,6 +197,11 @@ int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg,
uint64_t *msg_len);
int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,
struct opal_i2c_request *oreq);
+int64_t opal_prd_msg(struct opal_prd_msg *msg);
+int64_t opal_leds_get_ind(char *loc_code, __be64 *led_mask,
+ __be64 *led_value, __be64 *max_led_type);
+int64_t opal_leds_set_ind(uint64_t token, char *loc_code, const u64 led_mask,
+ const u64 led_value, __be64 *max_led_type);
int64_t opal_flash_read(uint64_t id, uint64_t offset, uint64_t buf,
uint64_t size, uint64_t token);
@@ -239,6 +248,10 @@ extern int opal_elog_init(void);
extern void opal_platform_dump_init(void);
extern void opal_sys_param_init(void);
extern void opal_msglog_init(void);
+extern int opal_async_comp_init(void);
+extern int opal_sensor_init(void);
+extern int opal_hmi_handler_init(void);
+extern int opal_event_init(void);
extern int opal_machine_check(struct pt_regs *regs);
extern bool opal_mce_check_early_recovery(struct pt_regs *regs);
@@ -250,6 +263,10 @@ extern int opal_resync_timebase(void);
extern void opal_lpc_init(void);
+extern void opal_kmsg_init(void);
+
+extern int opal_event_request(unsigned int opal_event_nr);
+
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
unsigned long vmalloc_size);
void opal_free_sg_list(struct opal_sg_list *sg);
diff --git a/kernel/arch/powerpc/include/asm/page.h b/kernel/arch/powerpc/include/asm/page.h
index 69c059887..3140c19c4 100644
--- a/kernel/arch/powerpc/include/asm/page.h
+++ b/kernel/arch/powerpc/include/asm/page.h
@@ -12,6 +12,7 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <linux/kernel.h>
#else
#include <asm/types.h>
#endif
@@ -107,12 +108,13 @@ extern long long virt_phys_offset;
#endif
/* See Description below for VIRT_PHYS_OFFSET */
-#ifdef CONFIG_RELOCATABLE_PPC32
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+#ifdef CONFIG_RELOCATABLE
#define VIRT_PHYS_OFFSET virt_phys_offset
#else
#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
#endif
-
+#endif
#ifdef CONFIG_PPC64
#define MEMORY_START 0UL
@@ -127,9 +129,10 @@ extern long long virt_phys_offset;
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
#endif
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
/*
* On Book-E parts we need __va to parse the device tree and we can't
@@ -204,7 +207,7 @@ extern long long virt_phys_offset;
* On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
* the other definitions for __va & __pa.
*/
-#ifdef CONFIG_BOOKE
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
#else
@@ -240,8 +243,8 @@ extern long long virt_phys_offset;
#endif
/* align addr on a size boundary - adjust address up/down if needed */
-#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
-#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
+#define _ALIGN_UP(addr, size) __ALIGN_KERNEL(addr, size)
+#define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1)))
/* align addr on a size boundary - adjust address up if needed */
#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
@@ -278,9 +281,7 @@ extern long long virt_phys_offset;
#ifndef __ASSEMBLY__
-#undef STRICT_MM_TYPECHECKS
-
-#ifdef STRICT_MM_TYPECHECKS
+#ifdef CONFIG_STRICT_MM_TYPECHECKS
/* These are used to make use of C type-checking. */
/* PTE level */
@@ -364,6 +365,20 @@ typedef struct { signed long pd; } hugepd_t;
#ifdef CONFIG_HUGETLB_PAGE
#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64K_PAGES
+/*
+ * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
+ * need to setup hugepage directory for them. Our pte and page directory format
+ * enable us to have this enabled. But to avoid errors when implementing new
+ * features disable hugepd for 64K. We enable a debug version here, So we catch
+ * wrong usage.
+ */
+#ifdef CONFIG_DEBUG_VM
+extern int hugepd_ok(hugepd_t hpd);
+#else
+#define hugepd_ok(x) (0)
+#endif
+#else
static inline int hugepd_ok(hugepd_t hpd)
{
/*
@@ -372,6 +387,7 @@ static inline int hugepd_ok(hugepd_t hpd)
*/
return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
}
+#endif
#else
static inline int hugepd_ok(hugepd_t hpd)
{
diff --git a/kernel/arch/powerpc/include/asm/pci-bridge.h b/kernel/arch/powerpc/include/asm/pci-bridge.h
index 1811c44bf..37fc53587 100644
--- a/kernel/arch/powerpc/include/asm/pci-bridge.h
+++ b/kernel/arch/powerpc/include/asm/pci-bridge.h
@@ -27,9 +27,24 @@ struct pci_controller_ops {
* allow assignment/enabling of the device. */
bool (*enable_device_hook)(struct pci_dev *);
+ void (*disable_device)(struct pci_dev *);
+
+ void (*release_device)(struct pci_dev *);
+
/* Called during PCI resource reassignment */
resource_size_t (*window_alignment)(struct pci_bus *, unsigned long type);
void (*reset_secondary_bus)(struct pci_dev *dev);
+
+#ifdef CONFIG_PCI_MSI
+ int (*setup_msi_irqs)(struct pci_dev *dev,
+ int nvec, int type);
+ void (*teardown_msi_irqs)(struct pci_dev *dev);
+#endif
+
+ int (*dma_set_mask)(struct pci_dev *dev, u64 dma_mask);
+ u64 (*dma_get_required_mask)(struct pci_dev *dev);
+
+ void (*shutdown)(struct pci_controller *);
};
/*
@@ -185,7 +200,7 @@ struct pci_dn {
struct pci_dn *parent;
struct pci_controller *phb; /* for pci devices */
- struct iommu_table *iommu_table; /* for phb's or bridges */
+ struct iommu_table_group *table_group; /* for phb's or bridges */
struct device_node *node; /* back-pointer to the device_node */
int pci_ext_config_space; /* for pci devices */
diff --git a/kernel/arch/powerpc/include/asm/pci.h b/kernel/arch/powerpc/include/asm/pci.h
index 4aef8d660..3453bd8dc 100644
--- a/kernel/arch/powerpc/include/asm/pci.h
+++ b/kernel/arch/powerpc/include/asm/pci.h
@@ -13,9 +13,9 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
#include <asm/machdep.h>
-#include <asm/scatterlist.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
@@ -71,36 +71,6 @@ extern struct dma_map_ops *get_pci_dma_ops(void);
*/
#define PCI_DISABLE_MWI
-#ifdef CONFIG_PCI
-static inline void pci_dma_burst_advice(struct pci_dev *pdev,
- enum pci_dma_burst_strategy *strat,
- unsigned long *strategy_parameter)
-{
- unsigned long cacheline_size;
- u8 byte;
-
- pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
- if (byte == 0)
- cacheline_size = 1024;
- else
- cacheline_size = (int) byte * 4;
-
- *strat = PCI_DMA_BURST_MULTIPLE;
- *strategy_parameter = cacheline_size;
-}
-#endif
-
-#else /* 32-bit */
-
-#ifdef CONFIG_PCI
-static inline void pci_dma_burst_advice(struct pci_dev *pdev,
- enum pci_dma_burst_strategy *strat,
- unsigned long *strategy_parameter)
-{
- *strat = PCI_DMA_BURST_INFINITY;
- *strategy_parameter = ~0UL;
-}
-#endif
#endif /* CONFIG_PPC64 */
extern int pci_domain_nr(struct pci_bus *bus);
diff --git a/kernel/arch/powerpc/include/asm/pgtable-ppc32.h b/kernel/arch/powerpc/include/asm/pgtable-ppc32.h
index 64b52b1cf..9c326565d 100644
--- a/kernel/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/kernel/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -170,24 +170,6 @@ static inline unsigned long pte_update(pte_t *p,
#ifdef PTE_ATOMIC_UPDATES
unsigned long old, tmp;
-#ifdef CONFIG_PPC_8xx
- unsigned long tmp2;
-
- __asm__ __volatile__("\
-1: lwarx %0,0,%4\n\
- andc %1,%0,%5\n\
- or %1,%1,%6\n\
- /* 0x200 == Extended encoding, bit 22 */ \
- /* Bit 22 has to be 1 when _PAGE_USER is unset and _PAGE_RO is set */ \
- rlwimi %1,%1,32-1,0x200\n /* get _PAGE_RO */ \
- rlwinm %3,%1,32-2,0x200\n /* get _PAGE_USER */ \
- andc %1,%1,%3\n\
- stwcx. %1,0,%4\n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p), "=&r" (tmp2)
- : "r" (p), "r" (clr), "r" (set), "m" (*p)
- : "cc" );
-#else /* CONFIG_PPC_8xx */
__asm__ __volatile__("\
1: lwarx %0,0,%3\n\
andc %1,%0,%4\n\
@@ -198,7 +180,6 @@ static inline unsigned long pte_update(pte_t *p,
: "=&r" (old), "=&r" (tmp), "=m" (*p)
: "r" (p), "r" (clr), "r" (set), "m" (*p)
: "cc" );
-#endif /* CONFIG_PPC_8xx */
#else /* PTE_ATOMIC_UPDATES */
unsigned long old = pte_val(*p);
*p = __pte((old & ~clr) | set);
diff --git a/kernel/arch/powerpc/include/asm/pgtable-ppc64.h b/kernel/arch/powerpc/include/asm/pgtable-ppc64.h
index 88d27e325..3245f2d96 100644
--- a/kernel/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/kernel/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -118,7 +118,7 @@
*/
#ifndef __real_pte
-#ifdef STRICT_MM_TYPECHECKS
+#ifdef CONFIG_STRICT_MM_TYPECHECKS
#define __real_pte(e,p) ((real_pte_t){(e)})
#define __rpte_to_pte(r) ((r).pte)
#else
@@ -134,23 +134,11 @@
#define pte_iterate_hashed_end() } while(0)
-#ifdef CONFIG_PPC_HAS_HASH_64K
/*
* We expect this to be called only for user addresses or kernel virtual
* addresses other than the linear mapping.
*/
-#define pte_pagesize_index(mm, addr, pte) \
- ({ \
- unsigned int psize; \
- if (is_kernel_addr(addr)) \
- psize = MMU_PAGE_4K; \
- else \
- psize = get_slice_psize(mm, addr); \
- psize; \
- })
-#else
#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
-#endif
#endif /* __real_pte */
@@ -359,11 +347,27 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/* Encode and de-code a swap entry */
-#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
-#define __swp_offset(entry) ((entry).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
-#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT })
+#define MAX_SWAPFILES_CHECK() do { \
+ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
+ /* \
+ * Don't have overlapping bits with _PAGE_HPTEFLAGS \
+ * We filter HPTEFLAGS on set_pte. \
+ */ \
+ BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
+ } while (0)
+/*
+ * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
+ */
+#define SWP_TYPE_BITS 5
+#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
+ & ((1UL << SWP_TYPE_BITS) - 1))
+#define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT)
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ ((type) << _PAGE_BIT_SWAP_TYPE) \
+ | ((offset) << PTE_RPN_SHIFT) })
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
+#define __swp_entry_to_pte(x) __pte((x).val)
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void);
@@ -433,9 +437,9 @@ static inline char *get_hpte_slot_array(pmd_t *pmdp)
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, unsigned long old_pmd);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
@@ -475,6 +479,14 @@ static inline int pmd_trans_splitting(pmd_t pmd)
}
extern int has_transparent_hugepage(void);
+#else
+static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp,
+ unsigned long old_pmd)
+{
+
+ WARN(1, "%s called with THP disabled\n", __func__);
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline int pmd_large(pmd_t pmd)
@@ -565,13 +577,9 @@ extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
-extern pmd_t pmdp_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pmd_t *pmdp);
-
-#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
-extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmdp);
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp);
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
@@ -588,6 +596,10 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#define pmdp_collapse_flush pmdp_collapse_flush
+
#define __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);
diff --git a/kernel/arch/powerpc/include/asm/pgtable.h b/kernel/arch/powerpc/include/asm/pgtable.h
index 11a38635d..b64b4212b 100644
--- a/kernel/arch/powerpc/include/asm/pgtable.h
+++ b/kernel/arch/powerpc/include/asm/pgtable.h
@@ -169,6 +169,17 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
* cases, and 32-bit non-hash with 32-bit PTEs.
*/
*ptep = pte;
+
+#ifdef CONFIG_PPC_BOOK3E_64
+ /*
+ * With hardware tablewalk, a sync is needed to ensure that
+ * subsequent accesses see the PTE we just wrote. Unlike userspace
+ * mappings, we can't tolerate spurious faults, so make sure
+ * the new PTE will be seen the first time.
+ */
+ if (is_kernel_addr(addr))
+ mb();
+#endif
#endif
}
@@ -248,15 +259,15 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
#define has_transparent_hugepage() 0
#endif
pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- unsigned *shift);
+ bool *is_thp, unsigned *shift);
static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- unsigned *shift)
+ bool *is_thp, unsigned *shift)
{
if (!arch_irqs_disabled()) {
pr_info("%s called with irq enabled\n", __func__);
dump_stack();
}
- return __find_linux_pte_or_hugepte(pgdir, ea, shift);
+ return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift);
}
#endif /* __ASSEMBLY__ */
diff --git a/kernel/arch/powerpc/include/asm/pnv-pci.h b/kernel/arch/powerpc/include/asm/pnv-pci.h
index f9b498292..6f77f71ee 100644
--- a/kernel/arch/powerpc/include/asm/pnv-pci.h
+++ b/kernel/arch/powerpc/include/asm/pnv-pci.h
@@ -11,7 +11,7 @@
#define _ASM_PNV_PCI_H
#include <linux/pci.h>
-#include <misc/cxl.h>
+#include <misc/cxl-base.h>
int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode);
int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
diff --git a/kernel/arch/powerpc/include/asm/ppc-opcode.h b/kernel/arch/powerpc/include/asm/ppc-opcode.h
index 5c93f691b..7ab04fc59 100644
--- a/kernel/arch/powerpc/include/asm/ppc-opcode.h
+++ b/kernel/arch/powerpc/include/asm/ppc-opcode.h
@@ -136,10 +136,11 @@
#define PPC_INST_DCBAL 0x7c2005ec
#define PPC_INST_DCBZL 0x7c2007ec
#define PPC_INST_ICBT 0x7c00002c
+#define PPC_INST_ICSWX 0x7c00032d
+#define PPC_INST_ICSWEPX 0x7c00076d
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8
-#define PPC_INST_LOGMPP 0x7c0007e4
#define PPC_INST_LSWI 0x7c0004aa
#define PPC_INST_LSWX 0x7c00042a
#define PPC_INST_LWARX 0x7c000028
@@ -283,20 +284,6 @@
#define __PPC_EH(eh) 0
#endif
-/* POWER8 Micro Partition Prefetch (MPP) parameters */
-/* Address mask is common for LOGMPP instruction and MPPR SPR */
-#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000
-
-/* Bits 60 and 61 of MPP SPR should be set to one of the following */
-/* Aborting the fetch is indeed setting 00 in the table size bits */
-#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60)
-#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60)
-
-/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */
-#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54)
-#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54)
-#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54)
-
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
__PPC_RA(a) | __PPC_RB(b))
@@ -305,8 +292,6 @@
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh))
-#define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \
- __PPC_RB(b))
#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh))
@@ -403,4 +388,15 @@
#define MFTMR(tmr, r) stringify_in_c(.long PPC_INST_MFTMR | \
TMRN(tmr) | ___PPC_RT(r))
+/* Coprocessor instructions */
+#define PPC_ICSWX(s, a, b) stringify_in_c(.long PPC_INST_ICSWX | \
+ ___PPC_RS(s) | \
+ ___PPC_RA(a) | \
+ ___PPC_RB(b))
+#define PPC_ICSWEPX(s, a, b) stringify_in_c(.long PPC_INST_ICSWEPX | \
+ ___PPC_RS(s) | \
+ ___PPC_RA(a) | \
+ ___PPC_RB(b))
+
+
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/kernel/arch/powerpc/include/asm/ppc-pci.h b/kernel/arch/powerpc/include/asm/ppc-pci.h
index 4122a86d6..ca0c5bff7 100644
--- a/kernel/arch/powerpc/include/asm/ppc-pci.h
+++ b/kernel/arch/powerpc/include/asm/ppc-pci.h
@@ -61,6 +61,7 @@ int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
void eeh_pe_state_mark(struct eeh_pe *pe, int state);
void eeh_pe_state_clear(struct eeh_pe *pe, int state);
+void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state);
void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode);
void eeh_sysfs_add_device(struct pci_dev *pdev);
diff --git a/kernel/arch/powerpc/include/asm/processor.h b/kernel/arch/powerpc/include/asm/processor.h
index bf117d8fb..5afea361b 100644
--- a/kernel/arch/powerpc/include/asm/processor.h
+++ b/kernel/arch/powerpc/include/asm/processor.h
@@ -264,7 +264,6 @@ struct thread_struct {
u64 tm_tfhar; /* Transaction fail handler addr */
u64 tm_texasr; /* Transaction exception & summary */
u64 tm_tfiar; /* Transaction fail instr address reg */
- unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
struct pt_regs ckpt_regs; /* Checkpointed registers */
unsigned long tm_tar;
@@ -295,6 +294,15 @@ struct thread_struct {
#endif
#ifdef CONFIG_PPC64
unsigned long dscr;
+ /*
+ * This member element dscr_inherit indicates that the process
+ * has explicitly attempted and changed the DSCR register value
+ * for itself. Hence kernel wont use the default CPU DSCR value
+ * contained in the PACA structure anymore during process context
+ * switch. Once this variable is set, this behaviour will also be
+ * inherited to all the children of this process from that point
+ * onwards.
+ */
int dscr_inherit;
unsigned long ppr; /* used to save/restore SMT priority */
#endif
diff --git a/kernel/arch/powerpc/include/asm/pte-8xx.h b/kernel/arch/powerpc/include/asm/pte-8xx.h
index 97bae64af..a0e2ba960 100644
--- a/kernel/arch/powerpc/include/asm/pte-8xx.h
+++ b/kernel/arch/powerpc/include/asm/pte-8xx.h
@@ -34,35 +34,32 @@
#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
#define _PAGE_DIRTY 0x0100 /* C: page changed */
-/* These 4 software bits must be masked out when the entry is loaded
- * into the TLB, 1 SW bit left(0x0080).
+/* These 4 software bits must be masked out when the L2 entry is loaded
+ * into the TLB.
*/
-#define _PAGE_GUARDED 0x0010 /* software: guarded access */
-#define _PAGE_ACCESSED 0x0020 /* software: page referenced */
-#define _PAGE_WRITETHRU 0x0040 /* software: caching is write through */
+#define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */
+#define _PAGE_USER 0x0020 /* Copied to L1 APG lsb */
+#define _PAGE_EXEC 0x0040 /* Copied to L1 APG */
+#define _PAGE_WRITETHRU 0x0080 /* software: caching is write through */
+#define _PAGE_ACCESSED 0x0800 /* software: page referenced */
-/* Setting any bits in the nibble with the follow two controls will
- * require a TLB exception handler change. It is assumed unused bits
- * are always zero.
- */
-#define _PAGE_RO 0x0400 /* lsb PP bits */
-#define _PAGE_USER 0x0800 /* msb PP bits */
-/* set when _PAGE_USER is unset and _PAGE_RO is set */
-#define _PAGE_KNLRO 0x0200
+#define _PAGE_RO 0x0600 /* Supervisor RO, User no access */
#define _PMD_PRESENT 0x0001
#define _PMD_BAD 0x0ff0
#define _PMD_PAGE_MASK 0x000c
#define _PMD_PAGE_8M 0x000c
-#define _PTE_NONE_MASK _PAGE_KNLRO
-
/* Until my rework is finished, 8xx still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1
/* We need to add _PAGE_SHARED to kernel pages */
-#define _PAGE_KERNEL_RO (_PAGE_SHARED | _PAGE_RO | _PAGE_KNLRO)
-#define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_RO | _PAGE_KNLRO)
+#define _PAGE_KERNEL_RO (_PAGE_SHARED | _PAGE_RO)
+#define _PAGE_KERNEL_ROX (_PAGE_SHARED | _PAGE_RO | _PAGE_EXEC)
+#define _PAGE_KERNEL_RW (_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
+ _PAGE_HWWRITE)
+#define _PAGE_KERNEL_RWX (_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
+ _PAGE_HWWRITE | _PAGE_EXEC)
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PTE_8xx_H */
diff --git a/kernel/arch/powerpc/include/asm/pte-book3e.h b/kernel/arch/powerpc/include/asm/pte-book3e.h
index 91a704952..8d8473278 100644
--- a/kernel/arch/powerpc/include/asm/pte-book3e.h
+++ b/kernel/arch/powerpc/include/asm/pte-book3e.h
@@ -11,6 +11,7 @@
/* Architected bits */
#define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */
#define _PAGE_SW1 0x000002
+#define _PAGE_BIT_SWAP_TYPE 2
#define _PAGE_BAP_SR 0x000004
#define _PAGE_BAP_UR 0x000008
#define _PAGE_BAP_SW 0x000010
diff --git a/kernel/arch/powerpc/include/asm/pte-common.h b/kernel/arch/powerpc/include/asm/pte-common.h
index c5a755ef7..71537a319 100644
--- a/kernel/arch/powerpc/include/asm/pte-common.h
+++ b/kernel/arch/powerpc/include/asm/pte-common.h
@@ -85,10 +85,8 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
* 64-bit PTEs
*/
#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
-#define PTE_RPN_MAX (1ULL << (64 - PTE_RPN_SHIFT))
#define PTE_RPN_MASK (~((1ULL<<PTE_RPN_SHIFT)-1))
#else
-#define PTE_RPN_MAX (1UL << (32 - PTE_RPN_SHIFT))
#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
#endif
@@ -111,7 +109,8 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
* the processor might need it for DMA coherency.
*/
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) || \
+ defined(CONFIG_PPC_E500MC)
#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
#else
#define _PAGE_BASE (_PAGE_BASE_NC)
diff --git a/kernel/arch/powerpc/include/asm/pte-hash64.h b/kernel/arch/powerpc/include/asm/pte-hash64.h
index fc852f7e7..ef612c160 100644
--- a/kernel/arch/powerpc/include/asm/pte-hash64.h
+++ b/kernel/arch/powerpc/include/asm/pte-hash64.h
@@ -16,6 +16,7 @@
*/
#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */
#define _PAGE_USER 0x0002 /* matches one of the PP bits */
+#define _PAGE_BIT_SWAP_TYPE 2
#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
#define _PAGE_GUARDED 0x0008
/* We can derive Memory coherence from _PAGE_NO_CACHE */
diff --git a/kernel/arch/powerpc/include/asm/qe_ic.h b/kernel/arch/powerpc/include/asm/qe_ic.h
index 25784cc95..1e155ca6d 100644
--- a/kernel/arch/powerpc/include/asm/qe_ic.h
+++ b/kernel/arch/powerpc/include/asm/qe_ic.h
@@ -59,14 +59,14 @@ enum qe_ic_grp_id {
#ifdef CONFIG_QUICC_ENGINE
void qe_ic_init(struct device_node *node, unsigned int flags,
- void (*low_handler)(unsigned int irq, struct irq_desc *desc),
- void (*high_handler)(unsigned int irq, struct irq_desc *desc));
+ void (*low_handler)(struct irq_desc *desc),
+ void (*high_handler)(struct irq_desc *desc));
unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
#else
static inline void qe_ic_init(struct device_node *node, unsigned int flags,
- void (*low_handler)(unsigned int irq, struct irq_desc *desc),
- void (*high_handler)(unsigned int irq, struct irq_desc *desc))
+ void (*low_handler)(struct irq_desc *desc),
+ void (*high_handler)(struct irq_desc *desc))
{}
static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
{ return 0; }
@@ -78,8 +78,7 @@ void qe_ic_set_highest_priority(unsigned int virq, int high);
int qe_ic_set_priority(unsigned int virq, unsigned int priority);
int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
-static inline void qe_ic_cascade_low_ipic(unsigned int irq,
- struct irq_desc *desc)
+static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
@@ -88,8 +87,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq,
generic_handle_irq(cascade_irq);
}
-static inline void qe_ic_cascade_high_ipic(unsigned int irq,
- struct irq_desc *desc)
+static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
@@ -98,8 +96,7 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq,
generic_handle_irq(cascade_irq);
}
-static inline void qe_ic_cascade_low_mpic(unsigned int irq,
- struct irq_desc *desc)
+static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
@@ -111,8 +108,7 @@ static inline void qe_ic_cascade_low_mpic(unsigned int irq,
chip->irq_eoi(&desc->irq_data);
}
-static inline void qe_ic_cascade_high_mpic(unsigned int irq,
- struct irq_desc *desc)
+static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
@@ -124,8 +120,7 @@ static inline void qe_ic_cascade_high_mpic(unsigned int irq,
chip->irq_eoi(&desc->irq_data);
}
-static inline void qe_ic_cascade_muxed_mpic(unsigned int irq,
- struct irq_desc *desc)
+static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
{
struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq;
diff --git a/kernel/arch/powerpc/include/asm/reg.h b/kernel/arch/powerpc/include/asm/reg.h
index af56b5c6c..2220f7a60 100644
--- a/kernel/arch/powerpc/include/asm/reg.h
+++ b/kernel/arch/powerpc/include/asm/reg.h
@@ -108,6 +108,7 @@
#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
#define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
+#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
@@ -226,7 +227,6 @@
#define CTRL_TE 0x00c00000 /* thread enable */
#define CTRL_RUNLATCH 0x1
#define SPRN_DAWR 0xB4
-#define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */
#define SPRN_RPR 0xBA /* Relative Priority Register */
#define SPRN_CIABR 0xBB
#define CIABR_PRIV 0x3
@@ -1193,8 +1193,7 @@
#ifdef CONFIG_PPC_BOOK3S_64
#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
: : "r" (v) : "memory")
-#define mtmsrd(v) __mtmsrd((v), 0)
-#define mtmsr(v) mtmsrd(v)
+#define mtmsr(v) __mtmsrd((v), 0)
#else
#define mtmsr(v) asm volatile("mtmsr %0" : \
: "r" ((unsigned long)(v)) \
@@ -1281,6 +1280,15 @@ struct pt_regs;
extern void ppc_save_regs(struct pt_regs *regs);
+static inline void update_power8_hid0(unsigned long hid0)
+{
+ /*
+ * The HID0 update on Power8 should at the very least be
+ * preceded by a a SYNC instruction followed by an ISYNC
+ * instruction
+ */
+ asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
+}
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_REG_H */
diff --git a/kernel/arch/powerpc/include/asm/reg_booke.h b/kernel/arch/powerpc/include/asm/reg_booke.h
index 16547efa2..2fef74b47 100644
--- a/kernel/arch/powerpc/include/asm/reg_booke.h
+++ b/kernel/arch/powerpc/include/asm/reg_booke.h
@@ -742,6 +742,12 @@
#define MMUBE1_VBE4 0x00000002
#define MMUBE1_VBE5 0x00000001
+#define TMRN_TMCFG0 16 /* Thread Management Configuration Register 0 */
+#define TMRN_TMCFG0_NPRIBITS 0x003f0000 /* Bits of thread priority */
+#define TMRN_TMCFG0_NPRIBITS_SHIFT 16
+#define TMRN_TMCFG0_NATHRD 0x00003f00 /* Number of active threads */
+#define TMRN_TMCFG0_NATHRD_SHIFT 8
+#define TMRN_TMCFG0_NTHRD 0x0000003f /* Number of threads */
#define TMRN_IMSR0 0x120 /* Initial MSR Register 0 (e6500) */
#define TMRN_IMSR1 0x121 /* Initial MSR Register 1 (e6500) */
#define TMRN_INIA0 0x140 /* Next Instruction Address Register 0 */
diff --git a/kernel/arch/powerpc/include/asm/spinlock.h b/kernel/arch/powerpc/include/asm/spinlock.h
index 4dbe072ee..523673d75 100644
--- a/kernel/arch/powerpc/include/asm/spinlock.h
+++ b/kernel/arch/powerpc/include/asm/spinlock.h
@@ -28,8 +28,6 @@
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
-#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
-
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
#ifdef __BIG_ENDIAN__
diff --git a/kernel/arch/powerpc/include/asm/spu_csa.h b/kernel/arch/powerpc/include/asm/spu_csa.h
index a40fd4912..51f80b41c 100644
--- a/kernel/arch/powerpc/include/asm/spu_csa.h
+++ b/kernel/arch/powerpc/include/asm/spu_csa.h
@@ -241,12 +241,6 @@ struct spu_priv2_collapsed {
*/
struct spu_state {
struct spu_lscsa *lscsa;
-#ifdef CONFIG_SPU_FS_64K_LS
- int use_big_pages;
- /* One struct page per 64k page */
-#define SPU_LSCSA_NUM_BIG_PAGES (sizeof(struct spu_lscsa) / 0x10000)
- struct page *lscsa_pages[SPU_LSCSA_NUM_BIG_PAGES];
-#endif
struct spu_problem_collapsed prob;
struct spu_priv1_collapsed priv1;
struct spu_priv2_collapsed priv2;
diff --git a/kernel/arch/powerpc/include/asm/synch.h b/kernel/arch/powerpc/include/asm/synch.h
index e682a7143..c50868681 100644
--- a/kernel/arch/powerpc/include/asm/synch.h
+++ b/kernel/arch/powerpc/include/asm/synch.h
@@ -44,7 +44,7 @@ static inline void isync(void)
MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
-#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
#define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
#else
#define PPC_ACQUIRE_BARRIER
diff --git a/kernel/arch/powerpc/include/asm/syscall.h b/kernel/arch/powerpc/include/asm/syscall.h
index ff21b7a2f..ab9f3f0a8 100644
--- a/kernel/arch/powerpc/include/asm/syscall.h
+++ b/kernel/arch/powerpc/include/asm/syscall.h
@@ -22,10 +22,15 @@
extern const unsigned long sys_call_table[];
#endif /* CONFIG_FTRACE_SYSCALLS */
-static inline long syscall_get_nr(struct task_struct *task,
- struct pt_regs *regs)
+static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
- return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1L;
+ /*
+ * Note that we are returning an int here. That means 0xffffffff, ie.
+ * 32-bit negative 1, will be interpreted as -1 on a 64-bit kernel.
+ * This is important for seccomp so that compat tasks can set r0 = -1
+ * to reject the syscall.
+ */
+ return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1;
}
static inline void syscall_rollback(struct task_struct *task,
@@ -34,12 +39,6 @@ static inline void syscall_rollback(struct task_struct *task,
regs->gpr[3] = regs->orig_gpr3;
}
-static inline long syscall_get_error(struct task_struct *task,
- struct pt_regs *regs)
-{
- return (regs->ccr & 0x10000000) ? -regs->gpr[3] : 0;
-}
-
static inline long syscall_get_return_value(struct task_struct *task,
struct pt_regs *regs)
{
@@ -50,9 +49,15 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
+ /*
+ * In the general case it's not obvious that we must deal with CCR
+ * here, as the syscall exit path will also do that for us. However
+ * there are some places, eg. the signal code, which check ccr to
+ * decide if the value in r3 is actually an error.
+ */
if (error) {
regs->ccr |= 0x10000000L;
- regs->gpr[3] = -error;
+ regs->gpr[3] = error;
} else {
regs->ccr &= ~0x10000000L;
regs->gpr[3] = val;
@@ -64,19 +69,22 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
unsigned long *args)
{
+ unsigned long val, mask = -1UL;
+
BUG_ON(i + n > 6);
-#ifdef CONFIG_PPC64
- if (test_tsk_thread_flag(task, TIF_32BIT)) {
- /*
- * Zero-extend 32-bit argument values. The high bits are
- * garbage ignored by the actual syscall dispatch.
- */
- while (n-- > 0)
- args[n] = (u32) regs->gpr[3 + i + n];
- return;
- }
+
+#ifdef CONFIG_COMPAT
+ if (test_tsk_thread_flag(task, TIF_32BIT))
+ mask = 0xffffffff;
#endif
- memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
+ while (n--) {
+ if (n == 0 && i == 0)
+ val = regs->orig_gpr3;
+ else
+ val = regs->gpr[3 + i + n];
+
+ args[n] = val & mask;
+ }
}
static inline void syscall_set_arguments(struct task_struct *task,
@@ -86,6 +94,10 @@ static inline void syscall_set_arguments(struct task_struct *task,
{
BUG_ON(i + n > 6);
memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+
+ /* Also copy the first argument into orig_gpr3 */
+ if (i == 0 && n > 0)
+ regs->orig_gpr3 = args[0];
}
static inline int syscall_get_arch(void)
diff --git a/kernel/arch/powerpc/include/asm/systbl.h b/kernel/arch/powerpc/include/asm/systbl.h
index f1863a138..5654ece02 100644
--- a/kernel/arch/powerpc/include/asm/systbl.h
+++ b/kernel/arch/powerpc/include/asm/systbl.h
@@ -358,7 +358,7 @@ SYSCALL_SPU(setns)
COMPAT_SYS(process_vm_readv)
COMPAT_SYS(process_vm_writev)
SYSCALL(finit_module)
-SYSCALL(ni_syscall) /* sys_kcmp */
+SYSCALL(kcmp) /* sys_kcmp */
SYSCALL_SPU(sched_setattr)
SYSCALL_SPU(sched_getattr)
SYSCALL_SPU(renameat2)
@@ -368,3 +368,18 @@ SYSCALL_SPU(memfd_create)
SYSCALL_SPU(bpf)
COMPAT_SYS(execveat)
PPC64ONLY(switch_endian)
+SYSCALL_SPU(userfaultfd)
+SYSCALL_SPU(membarrier)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL(mlock2)
diff --git a/kernel/arch/powerpc/include/asm/topology.h b/kernel/arch/powerpc/include/asm/topology.h
index 5f1048eaa..8b3b46b7b 100644
--- a/kernel/arch/powerpc/include/asm/topology.h
+++ b/kernel/arch/powerpc/include/asm/topology.h
@@ -87,7 +87,7 @@ static inline int prrn_is_enabled(void)
#include <asm/smp.h>
#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
-#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
+#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
#endif
diff --git a/kernel/arch/powerpc/include/asm/trace.h b/kernel/arch/powerpc/include/asm/trace.h
index c15da6073..8e86b48d0 100644
--- a/kernel/arch/powerpc/include/asm/trace.h
+++ b/kernel/arch/powerpc/include/asm/trace.h
@@ -144,6 +144,26 @@ TRACE_EVENT_FN(opal_exit,
);
#endif
+TRACE_EVENT(hash_fault,
+
+ TP_PROTO(unsigned long addr, unsigned long access, unsigned long trap),
+ TP_ARGS(addr, access, trap),
+ TP_STRUCT__entry(
+ __field(unsigned long, addr)
+ __field(unsigned long, access)
+ __field(unsigned long, trap)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->access = access;
+ __entry->trap = trap;
+ ),
+
+ TP_printk("hash fault with addr 0x%lx and access = 0x%lx trap = 0x%lx",
+ __entry->addr, __entry->access, __entry->trap)
+);
+
#endif /* _TRACE_POWERPC_H */
#undef TRACE_INCLUDE_PATH
diff --git a/kernel/arch/powerpc/include/asm/trace_clock.h b/kernel/arch/powerpc/include/asm/trace_clock.h
new file mode 100644
index 000000000..cf1ee75ca
--- /dev/null
+++ b/kernel/arch/powerpc/include/asm/trace_clock.h
@@ -0,0 +1,19 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
+ */
+
+#ifndef _ASM_PPC_TRACE_CLOCK_H
+#define _ASM_PPC_TRACE_CLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern u64 notrace trace_clock_ppc_tb(void);
+
+#define ARCH_TRACE_CLOCKS { trace_clock_ppc_tb, "ppc-tb", 0 },
+
+#endif /* _ASM_PPC_TRACE_CLOCK_H */
diff --git a/kernel/arch/powerpc/include/asm/tsi108_pci.h b/kernel/arch/powerpc/include/asm/tsi108_pci.h
index 5653d7cc3..ae59d5b67 100644
--- a/kernel/arch/powerpc/include/asm/tsi108_pci.h
+++ b/kernel/arch/powerpc/include/asm/tsi108_pci.h
@@ -39,7 +39,7 @@
extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary);
extern void tsi108_pci_int_init(struct device_node *node);
-extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc);
+extern void tsi108_irq_cascade(struct irq_desc *desc);
extern void tsi108_clear_pci_cfg_error(void);
#endif /* _ASM_POWERPC_TSI108_PCI_H */
diff --git a/kernel/arch/powerpc/include/asm/uaccess.h b/kernel/arch/powerpc/include/asm/uaccess.h
index a0c071d24..2a8ebae09 100644
--- a/kernel/arch/powerpc/include/asm/uaccess.h
+++ b/kernel/arch/powerpc/include/asm/uaccess.h
@@ -265,7 +265,7 @@ do { \
({ \
long __gu_err; \
unsigned long __gu_val; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \
@@ -279,7 +279,7 @@ do { \
({ \
long __gu_err; \
long long __gu_val; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \
@@ -293,7 +293,7 @@ do { \
({ \
long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
@@ -305,7 +305,7 @@ do { \
({ \
long __gu_err; \
unsigned long __gu_val; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
diff --git a/kernel/arch/powerpc/include/asm/unistd.h b/kernel/arch/powerpc/include/asm/unistd.h
index f4f8b667d..4b6b8ace1 100644
--- a/kernel/arch/powerpc/include/asm/unistd.h
+++ b/kernel/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 364
+#define __NR_syscalls 379
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/kernel/arch/powerpc/include/asm/vio.h b/kernel/arch/powerpc/include/asm/vio.h
index 4f9b7ca07..84286ec77 100644
--- a/kernel/arch/powerpc/include/asm/vio.h
+++ b/kernel/arch/powerpc/include/asm/vio.h
@@ -19,9 +19,9 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/mod_devicetable.h>
+#include <linux/scatterlist.h>
#include <asm/hvcall.h>
-#include <asm/scatterlist.h>
/*
* Architecture-specific constants for drivers to
diff --git a/kernel/arch/powerpc/include/asm/word-at-a-time.h b/kernel/arch/powerpc/include/asm/word-at-a-time.h
index 5b3a903ad..e4396a7d0 100644
--- a/kernel/arch/powerpc/include/asm/word-at-a-time.h
+++ b/kernel/arch/powerpc/include/asm/word-at-a-time.h
@@ -40,6 +40,11 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
return (val + c->high_bits) & ~rhs;
}
+static inline unsigned long zero_bytemask(unsigned long mask)
+{
+ return ~1ul << __fls(mask);
+}
+
#else
#ifdef CONFIG_64BIT