summaryrefslogtreecommitdiffstats
path: root/kernel/include/asm-generic
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/include/asm-generic
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/include/asm-generic')
-rw-r--r--kernel/include/asm-generic/asm-offsets.h1
-rw-r--r--kernel/include/asm-generic/atomic-long.h298
-rw-r--r--kernel/include/asm-generic/atomic.h15
-rw-r--r--kernel/include/asm-generic/atomic64.h4
-rw-r--r--kernel/include/asm-generic/barrier.h36
-rw-r--r--kernel/include/asm-generic/bitops/count_zeros.h57
-rw-r--r--kernel/include/asm-generic/cmpxchg.h14
-rw-r--r--kernel/include/asm-generic/cputime_nsecs.h5
-rw-r--r--kernel/include/asm-generic/dma-mapping-common.h118
-rw-r--r--kernel/include/asm-generic/early_ioremap.h8
-rw-r--r--kernel/include/asm-generic/fixmap.h3
-rw-r--r--kernel/include/asm-generic/gpio.h5
-rw-r--r--kernel/include/asm-generic/io-64-nonatomic-hi-lo.h34
-rw-r--r--kernel/include/asm-generic/io-64-nonatomic-lo-hi.h34
-rw-r--r--kernel/include/asm-generic/io.h47
-rw-r--r--kernel/include/asm-generic/iomap.h4
-rw-r--r--kernel/include/asm-generic/memory_model.h6
-rw-r--r--kernel/include/asm-generic/mm-arch-hooks.h16
-rw-r--r--kernel/include/asm-generic/mutex-dec.h8
-rw-r--r--kernel/include/asm-generic/mutex-xchg.h10
-rw-r--r--kernel/include/asm-generic/pci-dma-compat.h6
-rw-r--r--kernel/include/asm-generic/pci.h13
-rw-r--r--kernel/include/asm-generic/pci_iomap.h14
-rw-r--r--kernel/include/asm-generic/pgtable.h85
-rw-r--r--kernel/include/asm-generic/preempt.h14
-rw-r--r--kernel/include/asm-generic/qrwlock.h78
-rw-r--r--kernel/include/asm-generic/qrwlock_types.h4
-rw-r--r--kernel/include/asm-generic/qspinlock.h139
-rw-r--r--kernel/include/asm-generic/qspinlock_types.h79
-rw-r--r--kernel/include/asm-generic/rtc.h29
-rw-r--r--kernel/include/asm-generic/rwsem.h21
-rw-r--r--kernel/include/asm-generic/scatterlist.h34
-rw-r--r--kernel/include/asm-generic/tlb.h2
-rw-r--r--kernel/include/asm-generic/uaccess.h10
-rw-r--r--kernel/include/asm-generic/vmlinux.lds.h16
-rw-r--r--kernel/include/asm-generic/word-at-a-time.h80
36 files changed, 862 insertions, 485 deletions
diff --git a/kernel/include/asm-generic/asm-offsets.h b/kernel/include/asm-generic/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/kernel/include/asm-generic/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/kernel/include/asm-generic/atomic-long.h b/kernel/include/asm-generic/atomic-long.h
index b7babf020..eb1973bad 100644
--- a/kernel/include/asm-generic/atomic-long.h
+++ b/kernel/include/asm-generic/atomic-long.h
@@ -23,236 +23,168 @@
typedef atomic64_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+#define ATOMIC_LONG_PFX(x) atomic64 ## x
-static inline long atomic_long_read(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_read(v);
-}
-
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_set(v, i);
-}
-
-static inline void atomic_long_inc(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_inc(v);
-}
-
-static inline void atomic_long_dec(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_dec(v);
-}
-
-static inline void atomic_long_add(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_add(i, v);
-}
-
-static inline void atomic_long_sub(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_sub(i, v);
-}
-
-static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_sub_and_test(i, v);
-}
-
-static inline int atomic_long_dec_and_test(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_dec_and_test(v);
-}
-
-static inline int atomic_long_inc_and_test(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_inc_and_test(v);
-}
-
-static inline int atomic_long_add_negative(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_add_negative(i, v);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_add_return(i, v);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_sub_return(i, v);
-}
-
-static inline long atomic_long_inc_return(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_inc_return(v);
-}
-
-static inline long atomic_long_dec_return(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_dec_return(v);
-}
-
-static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_add_unless(v, a, u);
-}
-
-#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
-
-#define atomic_long_cmpxchg(l, old, new) \
- (atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
- (atomic64_xchg((atomic64_t *)(v), (new)))
-
-#else /* BITS_PER_LONG == 64 */
+#else
typedef atomic_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
-static inline long atomic_long_read(atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_read(v);
-}
-
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
- atomic_t *v = (atomic_t *)l;
-
- atomic_set(v, i);
-}
+#define ATOMIC_LONG_PFX(x) atomic ## x
+
+#endif
+
+#define ATOMIC_LONG_READ_OP(mo) \
+static inline long atomic_long_read##mo(const atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ return (long)ATOMIC_LONG_PFX(_read##mo)(v); \
+}
+ATOMIC_LONG_READ_OP()
+ATOMIC_LONG_READ_OP(_acquire)
+
+#undef ATOMIC_LONG_READ_OP
+
+#define ATOMIC_LONG_SET_OP(mo) \
+static inline void atomic_long_set##mo(atomic_long_t *l, long i) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ ATOMIC_LONG_PFX(_set##mo)(v, i); \
+}
+ATOMIC_LONG_SET_OP()
+ATOMIC_LONG_SET_OP(_release)
+
+#undef ATOMIC_LONG_SET_OP
+
+#define ATOMIC_LONG_ADD_SUB_OP(op, mo) \
+static inline long \
+atomic_long_##op##_return##mo(long i, atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v); \
+}
+ATOMIC_LONG_ADD_SUB_OP(add,)
+ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(add, _release)
+ATOMIC_LONG_ADD_SUB_OP(sub,)
+ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(sub, _release)
+
+#undef ATOMIC_LONG_ADD_SUB_OP
+
+#define atomic_long_cmpxchg_relaxed(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (old), (new)))
+#define atomic_long_cmpxchg_acquire(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (old), (new)))
+#define atomic_long_cmpxchg_release(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
+ (old), (new)))
+#define atomic_long_cmpxchg(l, old, new) \
+ (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
+
+#define atomic_long_xchg_relaxed(v, new) \
+ (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_acquire(v, new) \
+ (ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_release(v, new) \
+ (ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg(v, new) \
+ (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
static inline void atomic_long_inc(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- atomic_inc(v);
+ ATOMIC_LONG_PFX(_inc)(v);
}
static inline void atomic_long_dec(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- atomic_dec(v);
+ ATOMIC_LONG_PFX(_dec)(v);
}
-static inline void atomic_long_add(long i, atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
-
- atomic_add(i, v);
+#define ATOMIC_LONG_OP(op) \
+static inline void \
+atomic_long_##op(long i, atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ ATOMIC_LONG_PFX(_##op)(i, v); \
}
-static inline void atomic_long_sub(long i, atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
+ATOMIC_LONG_OP(add)
+ATOMIC_LONG_OP(sub)
+ATOMIC_LONG_OP(and)
+ATOMIC_LONG_OP(or)
+ATOMIC_LONG_OP(xor)
+ATOMIC_LONG_OP(andnot)
- atomic_sub(i, v);
-}
+#undef ATOMIC_LONG_OP
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return atomic_sub_and_test(i, v);
+ return ATOMIC_LONG_PFX(_sub_and_test)(i, v);
}
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return atomic_dec_and_test(v);
+ return ATOMIC_LONG_PFX(_dec_and_test)(v);
}
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return atomic_inc_and_test(v);
+ return ATOMIC_LONG_PFX(_inc_and_test)(v);
}
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return atomic_add_negative(i, v);
+ return ATOMIC_LONG_PFX(_add_negative)(i, v);
}
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_add_return(i, v);
+#define ATOMIC_LONG_INC_DEC_OP(op, mo) \
+static inline long \
+atomic_long_##op##_return##mo(atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v); \
}
+ATOMIC_LONG_INC_DEC_OP(inc,)
+ATOMIC_LONG_INC_DEC_OP(inc, _relaxed)
+ATOMIC_LONG_INC_DEC_OP(inc, _acquire)
+ATOMIC_LONG_INC_DEC_OP(inc, _release)
+ATOMIC_LONG_INC_DEC_OP(dec,)
+ATOMIC_LONG_INC_DEC_OP(dec, _relaxed)
+ATOMIC_LONG_INC_DEC_OP(dec, _acquire)
+ATOMIC_LONG_INC_DEC_OP(dec, _release)
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_sub_return(i, v);
-}
-
-static inline long atomic_long_inc_return(atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_inc_return(v);
-}
-
-static inline long atomic_long_dec_return(atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_dec_return(v);
-}
+#undef ATOMIC_LONG_INC_DEC_OP
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
- atomic_t *v = (atomic_t *)l;
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
- return (long)atomic_add_unless(v, a, u);
+ return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u);
}
-#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
-
-#define atomic_long_cmpxchg(l, old, new) \
- (atomic_cmpxchg((atomic_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
- (atomic_xchg((atomic_t *)(v), (new)))
-
-#endif /* BITS_PER_LONG == 64 */
+#define atomic_long_inc_not_zero(l) \
+ ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
diff --git a/kernel/include/asm-generic/atomic.h b/kernel/include/asm-generic/atomic.h
index 1973ad2b1..74f1a3704 100644
--- a/kernel/include/asm-generic/atomic.h
+++ b/kernel/include/asm-generic/atomic.h
@@ -98,15 +98,16 @@ ATOMIC_OP_RETURN(add, +)
ATOMIC_OP_RETURN(sub, -)
#endif
-#ifndef atomic_clear_mask
+#ifndef atomic_and
ATOMIC_OP(and, &)
-#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
#endif
-#ifndef atomic_set_mask
-#define CONFIG_ARCH_HAS_ATOMIC_OR
+#ifndef atomic_or
ATOMIC_OP(or, |)
-#define atomic_set_mask(i, v) atomic_or((i), (v))
+#endif
+
+#ifndef atomic_xor
+ATOMIC_OP(xor, ^)
#endif
#undef ATOMIC_OP_RETURN
@@ -126,7 +127,7 @@ ATOMIC_OP(or, |)
* Atomically reads the value of @v.
*/
#ifndef atomic_read
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
#endif
/**
@@ -136,7 +137,7 @@ ATOMIC_OP(or, |)
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#include <linux/irqflags.h>
diff --git a/kernel/include/asm-generic/atomic64.h b/kernel/include/asm-generic/atomic64.h
index 30ad9c86c..d48e78cca 100644
--- a/kernel/include/asm-generic/atomic64.h
+++ b/kernel/include/asm-generic/atomic64.h
@@ -32,6 +32,10 @@ extern long long atomic64_##op##_return(long long a, atomic64_t *v);
ATOMIC64_OPS(add)
ATOMIC64_OPS(sub)
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/kernel/include/asm-generic/barrier.h b/kernel/include/asm-generic/barrier.h
index f5c40b0fa..b42afada1 100644
--- a/kernel/include/asm-generic/barrier.h
+++ b/kernel/include/asm-generic/barrier.h
@@ -55,19 +55,45 @@
#endif
#ifdef CONFIG_SMP
+
+#ifndef smp_mb
#define smp_mb() mb()
+#endif
+
+#ifndef smp_rmb
#define smp_rmb() rmb()
+#endif
+
+#ifndef smp_wmb
#define smp_wmb() wmb()
+#endif
+
+#ifndef smp_read_barrier_depends
#define smp_read_barrier_depends() read_barrier_depends()
-#else
+#endif
+
+#else /* !CONFIG_SMP */
+
+#ifndef smp_mb
#define smp_mb() barrier()
+#endif
+
+#ifndef smp_rmb
#define smp_rmb() barrier()
+#endif
+
+#ifndef smp_wmb
#define smp_wmb() barrier()
+#endif
+
+#ifndef smp_read_barrier_depends
#define smp_read_barrier_depends() do { } while (0)
#endif
-#ifndef set_mb
-#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
+#endif /* CONFIG_SMP */
+
+#ifndef smp_store_mb
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#endif
#ifndef smp_mb__before_atomic
@@ -82,12 +108,12 @@
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
- ACCESS_ONCE(*p) = (v); \
+ WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
diff --git a/kernel/include/asm-generic/bitops/count_zeros.h b/kernel/include/asm-generic/bitops/count_zeros.h
deleted file mode 100644
index 97520d21f..000000000
--- a/kernel/include/asm-generic/bitops/count_zeros.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* Count leading and trailing zeros functions
- *
- * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _ASM_GENERIC_BITOPS_COUNT_ZEROS_H_
-#define _ASM_GENERIC_BITOPS_COUNT_ZEROS_H_
-
-#include <asm/bitops.h>
-
-/**
- * count_leading_zeros - Count the number of zeros from the MSB back
- * @x: The value
- *
- * Count the number of leading zeros from the MSB going towards the LSB in @x.
- *
- * If the MSB of @x is set, the result is 0.
- * If only the LSB of @x is set, then the result is BITS_PER_LONG-1.
- * If @x is 0 then the result is COUNT_LEADING_ZEROS_0.
- */
-static inline int count_leading_zeros(unsigned long x)
-{
- if (sizeof(x) == 4)
- return BITS_PER_LONG - fls(x);
- else
- return BITS_PER_LONG - fls64(x);
-}
-
-#define COUNT_LEADING_ZEROS_0 BITS_PER_LONG
-
-/**
- * count_trailing_zeros - Count the number of zeros from the LSB forwards
- * @x: The value
- *
- * Count the number of trailing zeros from the LSB going towards the MSB in @x.
- *
- * If the LSB of @x is set, the result is 0.
- * If only the MSB of @x is set, then the result is BITS_PER_LONG-1.
- * If @x is 0 then the result is COUNT_TRAILING_ZEROS_0.
- */
-static inline int count_trailing_zeros(unsigned long x)
-{
-#define COUNT_TRAILING_ZEROS_0 (-1)
-
- if (sizeof(x) == 4)
- return ffs(x);
- else
- return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0;
-}
-
-#endif /* _ASM_GENERIC_BITOPS_COUNT_ZEROS_H_ */
diff --git a/kernel/include/asm-generic/cmpxchg.h b/kernel/include/asm-generic/cmpxchg.h
index 811fb1e9b..e5f9080e8 100644
--- a/kernel/include/asm-generic/cmpxchg.h
+++ b/kernel/include/asm-generic/cmpxchg.h
@@ -79,23 +79,23 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
}
}
-#define xchg(ptr, x) \
- ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+#define xchg(ptr, x) ({ \
+ ((__typeof__(*(ptr))) \
+ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
+})
#endif /* xchg */
/*
* Atomic compare and exchange.
- *
- * Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether
- * a cmpxchg primitive faster than repeated local irq save/restore exists.
*/
#include <asm-generic/cmpxchg-local.h>
#ifndef cmpxchg_local
-#define cmpxchg_local(ptr, o, n) \
+#define cmpxchg_local(ptr, o, n) ({ \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr))))
+ (unsigned long)(n), sizeof(*(ptr)))); \
+})
#endif
#ifndef cmpxchg64_local
diff --git a/kernel/include/asm-generic/cputime_nsecs.h b/kernel/include/asm-generic/cputime_nsecs.h
index 041948589..0f1c6f315 100644
--- a/kernel/include/asm-generic/cputime_nsecs.h
+++ b/kernel/include/asm-generic/cputime_nsecs.h
@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
*/
static inline cputime_t timespec_to_cputime(const struct timespec *val)
{
- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
return (__force cputime_t) ret;
}
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
*/
static inline cputime_t timeval_to_cputime(const struct timeval *val)
{
- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
+ val->tv_usec * NSEC_PER_USEC;
return (__force cputime_t) ret;
}
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
diff --git a/kernel/include/asm-generic/dma-mapping-common.h b/kernel/include/asm-generic/dma-mapping-common.h
index 940d5ec12..b1bc954ec 100644
--- a/kernel/include/asm-generic/dma-mapping-common.h
+++ b/kernel/include/asm-generic/dma-mapping-common.h
@@ -6,6 +6,7 @@
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
+#include <asm-generic/dma-coherent.h>
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size,
@@ -237,4 +238,121 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+#ifndef arch_dma_alloc_attrs
+#define arch_dma_alloc_attrs(dev, flag) (true)
+#endif
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ BUG_ON(!ops);
+
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
+ return cpu_addr;
+
+ if (!arch_dma_alloc_attrs(&dev, &flag))
+ return NULL;
+ if (!ops->alloc)
+ return NULL;
+
+ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+ return cpu_addr;
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!ops);
+ WARN_ON(irqs_disabled());
+
+ if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+ return;
+
+ if (!ops->free)
+ return;
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
+}
+
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
+}
+
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ debug_dma_mapping_error(dev, dma_addr);
+
+ if (get_dma_ops(dev)->mapping_error)
+ return get_dma_ops(dev)->mapping_error(dev, dma_addr);
+
+#ifdef DMA_ERROR_CODE
+ return dma_addr == DMA_ERROR_CODE;
+#else
+ return 0;
+#endif
+}
+
+#ifndef HAVE_ARCH_DMA_SUPPORTED
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!ops)
+ return 0;
+ if (!ops->dma_supported)
+ return 1;
+ return ops->dma_supported(dev, mask);
+}
+#endif
+
+#ifndef HAVE_ARCH_DMA_SET_MASK
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (ops->set_dma_mask)
+ return ops->set_dma_mask(dev, mask);
+
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+ *dev->dma_mask = mask;
+ return 0;
+}
+#endif
+
#endif
diff --git a/kernel/include/asm-generic/early_ioremap.h b/kernel/include/asm-generic/early_ioremap.h
index a5de55c04..734ad4db3 100644
--- a/kernel/include/asm-generic/early_ioremap.h
+++ b/kernel/include/asm-generic/early_ioremap.h
@@ -11,6 +11,8 @@ extern void __iomem *early_ioremap(resource_size_t phys_addr,
unsigned long size);
extern void *early_memremap(resource_size_t phys_addr,
unsigned long size);
+extern void *early_memremap_ro(resource_size_t phys_addr,
+ unsigned long size);
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void early_memunmap(void *addr, unsigned long size);
@@ -33,6 +35,12 @@ extern void early_ioremap_setup(void);
*/
extern void early_ioremap_reset(void);
+/*
+ * Early copy from unmapped memory to kernel mapped memory.
+ */
+extern void copy_from_early_mem(void *dest, phys_addr_t src,
+ unsigned long size);
+
#else
static inline void early_ioremap_init(void) { }
static inline void early_ioremap_setup(void) { }
diff --git a/kernel/include/asm-generic/fixmap.h b/kernel/include/asm-generic/fixmap.h
index f23174fb9..1cbb8338e 100644
--- a/kernel/include/asm-generic/fixmap.h
+++ b/kernel/include/asm-generic/fixmap.h
@@ -46,6 +46,9 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#ifndef FIXMAP_PAGE_NORMAL
#define FIXMAP_PAGE_NORMAL PAGE_KERNEL
#endif
+#if !defined(FIXMAP_PAGE_RO) && defined(PAGE_KERNEL_RO)
+#define FIXMAP_PAGE_RO PAGE_KERNEL_RO
+#endif
#ifndef FIXMAP_PAGE_NOCACHE
#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE
#endif
diff --git a/kernel/include/asm-generic/gpio.h b/kernel/include/asm-generic/gpio.h
index 9bb0d1172..40ec1433f 100644
--- a/kernel/include/asm-generic/gpio.h
+++ b/kernel/include/asm-generic/gpio.h
@@ -128,11 +128,6 @@ static inline int gpio_export_link(struct device *dev, const char *name,
return gpiod_export_link(dev, name, gpio_to_desc(gpio));
}
-static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
- return gpiod_sysfs_set_active_low(gpio_to_desc(gpio), value);
-}
-
static inline void gpio_unexport(unsigned gpio)
{
gpiod_unexport(gpio_to_desc(gpio));
diff --git a/kernel/include/asm-generic/io-64-nonatomic-hi-lo.h b/kernel/include/asm-generic/io-64-nonatomic-hi-lo.h
index 2e29d13fc..32b73abce 100644
--- a/kernel/include/asm-generic/io-64-nonatomic-hi-lo.h
+++ b/kernel/include/asm-generic/io-64-nonatomic-hi-lo.h
@@ -1,32 +1,2 @@
-#ifndef _ASM_IO_64_NONATOMIC_HI_LO_H_
-#define _ASM_IO_64_NONATOMIC_HI_LO_H_
-
-#include <linux/io.h>
-#include <asm-generic/int-ll64.h>
-
-static inline __u64 hi_lo_readq(const volatile void __iomem *addr)
-{
- const volatile u32 __iomem *p = addr;
- u32 low, high;
-
- high = readl(p + 1);
- low = readl(p);
-
- return low + ((u64)high << 32);
-}
-
-static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr)
-{
- writel(val >> 32, addr + 4);
- writel(val, addr);
-}
-
-#ifndef readq
-#define readq hi_lo_readq
-#endif
-
-#ifndef writeq
-#define writeq hi_lo_writeq
-#endif
-
-#endif /* _ASM_IO_64_NONATOMIC_HI_LO_H_ */
+/* XXX: delete asm-generic/io-64-nonatomic-hi-lo.h after converting new users */
+#include <linux/io-64-nonatomic-hi-lo.h>
diff --git a/kernel/include/asm-generic/io-64-nonatomic-lo-hi.h b/kernel/include/asm-generic/io-64-nonatomic-lo-hi.h
index 0efacff0a..55a627c37 100644
--- a/kernel/include/asm-generic/io-64-nonatomic-lo-hi.h
+++ b/kernel/include/asm-generic/io-64-nonatomic-lo-hi.h
@@ -1,32 +1,2 @@
-#ifndef _ASM_IO_64_NONATOMIC_LO_HI_H_
-#define _ASM_IO_64_NONATOMIC_LO_HI_H_
-
-#include <linux/io.h>
-#include <asm-generic/int-ll64.h>
-
-static inline __u64 lo_hi_readq(const volatile void __iomem *addr)
-{
- const volatile u32 __iomem *p = addr;
- u32 low, high;
-
- low = readl(p);
- high = readl(p + 1);
-
- return low + ((u64)high << 32);
-}
-
-static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr)
-{
- writel(val, addr);
- writel(val >> 32, addr + 4);
-}
-
-#ifndef readq
-#define readq lo_hi_readq
-#endif
-
-#ifndef writeq
-#define writeq lo_hi_writeq
-#endif
-
-#endif /* _ASM_IO_64_NONATOMIC_LO_HI_H_ */
+/* XXX: delete asm-generic/io-64-nonatomic-lo-hi.h after converting new users */
+#include <linux/io-64-nonatomic-lo-hi.h>
diff --git a/kernel/include/asm-generic/io.h b/kernel/include/asm-generic/io.h
index 9db042304..eed3bbe88 100644
--- a/kernel/include/asm-generic/io.h
+++ b/kernel/include/asm-generic/io.h
@@ -736,6 +736,35 @@ static inline void *phys_to_virt(unsigned long address)
}
#endif
+/**
+ * DOC: ioremap() and ioremap_*() variants
+ *
+ * If you have an IOMMU your architecture is expected to have both ioremap()
+ * and iounmap() implemented otherwise the asm-generic helpers will provide a
+ * direct mapping.
+ *
+ * There are ioremap_*() call variants, if you have no IOMMU we naturally will
+ * default to direct mapping for all of them, you can override these defaults.
+ * If you have an IOMMU you are highly encouraged to provide your own
+ * ioremap variant implementation as there currently is no safe architecture
+ * agnostic default. To avoid possible improper behaviour default asm-generic
+ * ioremap_*() variants all return NULL when an IOMMU is available. If you've
+ * defined your own ioremap_*() variant you must then declare your own
+ * ioremap_*() variant as defined to itself to avoid the default NULL return.
+ */
+
+#ifdef CONFIG_MMU
+
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+{
+ return NULL;
+}
+#endif
+
+#else /* !CONFIG_MMU */
+
/*
* Change "struct page" to physical address.
*
@@ -743,7 +772,6 @@ static inline void *phys_to_virt(unsigned long address)
* you'll need to provide your own definitions.
*/
-#ifndef CONFIG_MMU
#ifndef ioremap
#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
@@ -769,6 +797,14 @@ static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
}
#endif
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+
#ifndef ioremap_wc
#define ioremap_wc ioremap_wc
static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
@@ -777,8 +813,17 @@ static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
}
#endif
+#ifndef ioremap_wt
+#define ioremap_wt ioremap_wt
+static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+
#ifndef iounmap
#define iounmap iounmap
+
static inline void iounmap(void __iomem *addr)
{
}
diff --git a/kernel/include/asm-generic/iomap.h b/kernel/include/asm-generic/iomap.h
index 1b4101164..d8f8622fa 100644
--- a/kernel/include/asm-generic/iomap.h
+++ b/kernel/include/asm-generic/iomap.h
@@ -66,6 +66,10 @@ extern void ioport_unmap(void __iomem *);
#define ioremap_wc ioremap_nocache
#endif
+#ifndef ARCH_HAS_IOREMAP_WT
+#define ioremap_wt ioremap_nocache
+#endif
+
#ifdef CONFIG_PCI
/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
diff --git a/kernel/include/asm-generic/memory_model.h b/kernel/include/asm-generic/memory_model.h
index 14909b0b9..4b4b056a6 100644
--- a/kernel/include/asm-generic/memory_model.h
+++ b/kernel/include/asm-generic/memory_model.h
@@ -69,6 +69,12 @@
})
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+/*
+ * Convert a physical address to a Page Frame Number and back
+ */
+#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
+#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
+
#define page_to_pfn __page_to_pfn
#define pfn_to_page __pfn_to_page
diff --git a/kernel/include/asm-generic/mm-arch-hooks.h b/kernel/include/asm-generic/mm-arch-hooks.h
new file mode 100644
index 000000000..5ff0e5193
--- /dev/null
+++ b/kernel/include/asm-generic/mm-arch-hooks.h
@@ -0,0 +1,16 @@
+/*
+ * Architecture specific mm hooks
+ */
+
+#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H
+#define _ASM_GENERIC_MM_ARCH_HOOKS_H
+
+/*
+ * This file should be included through arch/../include/asm/Kbuild for
+ * the architecture which doesn't need specific mm hooks.
+ *
+ * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h
+ * are used.
+ */
+
+#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */
diff --git a/kernel/include/asm-generic/mutex-dec.h b/kernel/include/asm-generic/mutex-dec.h
index d4f9fb4e5..fd694cfd6 100644
--- a/kernel/include/asm-generic/mutex-dec.h
+++ b/kernel/include/asm-generic/mutex-dec.h
@@ -20,7 +20,7 @@
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
- if (unlikely(atomic_dec_return(count) < 0))
+ if (unlikely(atomic_dec_return_acquire(count) < 0))
fail_fn(count);
}
@@ -35,7 +35,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_lock_retval(atomic_t *count)
{
- if (unlikely(atomic_dec_return(count) < 0))
+ if (unlikely(atomic_dec_return_acquire(count) < 0))
return -1;
return 0;
}
@@ -56,7 +56,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
- if (unlikely(atomic_inc_return(count) <= 0))
+ if (unlikely(atomic_inc_return_release(count) <= 0))
fail_fn(count);
}
@@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
- if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+ if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1))
return 1;
return 0;
}
diff --git a/kernel/include/asm-generic/mutex-xchg.h b/kernel/include/asm-generic/mutex-xchg.h
index f169ec064..a6b4a7bd6 100644
--- a/kernel/include/asm-generic/mutex-xchg.h
+++ b/kernel/include/asm-generic/mutex-xchg.h
@@ -31,7 +31,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* to ensure that any waiting tasks are woken up by the
* unlock slow path.
*/
- if (likely(atomic_xchg(count, -1) != 1))
+ if (likely(atomic_xchg_acquire(count, -1) != 1))
fail_fn(count);
}
@@ -46,7 +46,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_lock_retval(atomic_t *count)
{
- if (unlikely(atomic_xchg(count, 0) != 1))
+ if (unlikely(atomic_xchg_acquire(count, 0) != 1))
if (likely(atomic_xchg(count, -1) != 1))
return -1;
return 0;
@@ -67,7 +67,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
- if (unlikely(atomic_xchg(count, 1) != 0))
+ if (unlikely(atomic_xchg_release(count, 1) != 0))
fail_fn(count);
}
@@ -91,7 +91,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
- int prev = atomic_xchg(count, 0);
+ int prev = atomic_xchg_acquire(count, 0);
if (unlikely(prev < 0)) {
/*
@@ -105,7 +105,7 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
* owner's unlock path needlessly, but that's not a problem
* in practice. ]
*/
- prev = atomic_xchg(count, prev);
+ prev = atomic_xchg_acquire(count, prev);
if (prev < 0)
prev = 0;
}
diff --git a/kernel/include/asm-generic/pci-dma-compat.h b/kernel/include/asm-generic/pci-dma-compat.h
index c110843fc..eafce7b6f 100644
--- a/kernel/include/asm-generic/pci-dma-compat.h
+++ b/kernel/include/asm-generic/pci-dma-compat.h
@@ -6,12 +6,6 @@
#include <linux/dma-mapping.h>
-static inline int
-pci_dma_supported(struct pci_dev *hwdev, u64 mask)
-{
- return dma_supported(hwdev == NULL ? NULL : &hwdev->dev, mask);
-}
-
static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
diff --git a/kernel/include/asm-generic/pci.h b/kernel/include/asm-generic/pci.h
index e80a0495e..f24bc519b 100644
--- a/kernel/include/asm-generic/pci.h
+++ b/kernel/include/asm-generic/pci.h
@@ -6,19 +6,6 @@
#ifndef _ASM_GENERIC_PCI_H
#define _ASM_GENERIC_PCI_H
-static inline struct resource *
-pcibios_select_root(struct pci_dev *pdev, struct resource *res)
-{
- struct resource *root = NULL;
-
- if (res->flags & IORESOURCE_IO)
- root = &ioport_resource;
- if (res->flags & IORESOURCE_MEM)
- root = &iomem_resource;
-
- return root;
-}
-
#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
diff --git a/kernel/include/asm-generic/pci_iomap.h b/kernel/include/asm-generic/pci_iomap.h
index 7389c8711..b1e17fcee 100644
--- a/kernel/include/asm-generic/pci_iomap.h
+++ b/kernel/include/asm-generic/pci_iomap.h
@@ -15,9 +15,13 @@ struct pci_dev;
#ifdef CONFIG_PCI
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max);
extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
+extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen);
/* Create a virtual mapping cookie for a port on a given PCI device.
* Do not call this directly, it exists to make it easier for architectures
* to override */
@@ -34,12 +38,22 @@ static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned lon
return NULL;
}
+static inline void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max)
+{
+ return NULL;
+}
static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen)
{
return NULL;
}
+static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ return NULL;
+}
#endif
#endif /* __ASM_GENERIC_IO_H */
diff --git a/kernel/include/asm-generic/pgtable.h b/kernel/include/asm-generic/pgtable.h
index 39f1d6a2b..14b0ff32f 100644
--- a/kernel/include/asm-generic/pgtable.h
+++ b/kernel/include/asm-generic/pgtable.h
@@ -30,9 +30,19 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma,
#endif
#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty);
+#else
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -64,12 +74,12 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
return r;
}
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+#else
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp)
{
- BUG();
+ BUILD_BUG();
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -81,8 +91,21 @@ int ptep_clear_flush_young(struct vm_area_struct *vma,
#endif
#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
-int pmdp_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+/*
+ * Despite relevant to THP only, this API is called from generic rmap code
+ * under PageTransHuge(), hence needs a dummy implementation for !THP
+ */
+static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
@@ -96,11 +119,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
}
#endif
-#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pmd_t *pmdp)
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
pmd_clear(pmdp);
@@ -109,13 +132,13 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
-#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
+static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp,
int full)
{
- return pmdp_get_and_clear(mm, address, pmdp);
+ return pmdp_huge_get_and_clear(mm, address, pmdp);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
@@ -152,8 +175,8 @@ extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
pte_t *ptep);
#endif
-#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
-extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
+#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
+extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp);
#endif
@@ -175,11 +198,11 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
pmd_t old_pmd = *pmdp;
set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
}
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+#else
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
- BUG();
+ BUILD_BUG();
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
@@ -189,6 +212,22 @@ extern void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#endif
+#ifndef pmdp_collapse_flush
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return *pmdp;
+}
+#define pmdp_collapse_flush pmdp_collapse_flush
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);
@@ -232,7 +271,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
- BUG();
+ BUILD_BUG();
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -262,6 +301,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#define pgprot_writecombine pgprot_noncached
#endif
+#ifndef pgprot_writethrough
+#define pgprot_writethrough pgprot_noncached
+#endif
+
#ifndef pgprot_device
#define pgprot_device pgprot_noncached
#endif
@@ -462,6 +505,16 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
return pmd;
}
+static inline pte_t pte_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
return pte;
diff --git a/kernel/include/asm-generic/preempt.h b/kernel/include/asm-generic/preempt.h
index eb6f9e6c3..5d8ffa3e6 100644
--- a/kernel/include/asm-generic/preempt.h
+++ b/kernel/include/asm-generic/preempt.h
@@ -24,7 +24,7 @@ static __always_inline void preempt_count_set(int pc)
* must be macros to avoid header recursion hell
*/
#define init_task_preempt_count(p) do { \
- task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
+ task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
} while (0)
#define init_idle_preempt_count(p, cpu) do { \
@@ -71,19 +71,17 @@ static __always_inline bool __preempt_count_dec_and_test(void)
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
-static __always_inline bool should_resched(void)
+static __always_inline bool should_resched(int preempt_offset)
{
- return unlikely(!preempt_count() && tif_need_resched());
+ return unlikely(preempt_count() == preempt_offset &&
+ tif_need_resched());
}
#ifdef CONFIG_PREEMPT
extern asmlinkage void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule()
-
-#ifdef CONFIG_CONTEXT_TRACKING
-extern asmlinkage void preempt_schedule_context(void);
-#define __preempt_schedule_context() preempt_schedule_context()
-#endif
+extern asmlinkage void preempt_schedule_notrace(void);
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPT */
#endif /* __ASM_PREEMPT_H */
diff --git a/kernel/include/asm-generic/qrwlock.h b/kernel/include/asm-generic/qrwlock.h
index 6383d54bf..54a8e65e1 100644
--- a/kernel/include/asm-generic/qrwlock.h
+++ b/kernel/include/asm-generic/qrwlock.h
@@ -36,39 +36,39 @@
/*
* External function declarations
*/
-extern void queue_read_lock_slowpath(struct qrwlock *lock);
-extern void queue_write_lock_slowpath(struct qrwlock *lock);
+extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
+extern void queued_write_lock_slowpath(struct qrwlock *lock);
/**
- * queue_read_can_lock- would read_trylock() succeed?
+ * queued_read_can_lock- would read_trylock() succeed?
* @lock: Pointer to queue rwlock structure
*/
-static inline int queue_read_can_lock(struct qrwlock *lock)
+static inline int queued_read_can_lock(struct qrwlock *lock)
{
return !(atomic_read(&lock->cnts) & _QW_WMASK);
}
/**
- * queue_write_can_lock- would write_trylock() succeed?
+ * queued_write_can_lock- would write_trylock() succeed?
* @lock: Pointer to queue rwlock structure
*/
-static inline int queue_write_can_lock(struct qrwlock *lock)
+static inline int queued_write_can_lock(struct qrwlock *lock)
{
return !atomic_read(&lock->cnts);
}
/**
- * queue_read_trylock - try to acquire read lock of a queue rwlock
+ * queued_read_trylock - try to acquire read lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
-static inline int queue_read_trylock(struct qrwlock *lock)
+static inline int queued_read_trylock(struct qrwlock *lock)
{
u32 cnts;
cnts = atomic_read(&lock->cnts);
if (likely(!(cnts & _QW_WMASK))) {
- cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
+ cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
if (likely(!(cnts & _QW_WMASK)))
return 1;
atomic_sub(_QR_BIAS, &lock->cnts);
@@ -77,11 +77,11 @@ static inline int queue_read_trylock(struct qrwlock *lock)
}
/**
- * queue_write_trylock - try to acquire write lock of a queue rwlock
+ * queued_write_trylock - try to acquire write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
-static inline int queue_write_trylock(struct qrwlock *lock)
+static inline int queued_write_trylock(struct qrwlock *lock)
{
u32 cnts;
@@ -89,78 +89,70 @@ static inline int queue_write_trylock(struct qrwlock *lock)
if (unlikely(cnts))
return 0;
- return likely(atomic_cmpxchg(&lock->cnts,
- cnts, cnts | _QW_LOCKED) == cnts);
+ return likely(atomic_cmpxchg_acquire(&lock->cnts,
+ cnts, cnts | _QW_LOCKED) == cnts);
}
/**
- * queue_read_lock - acquire read lock of a queue rwlock
+ * queued_read_lock - acquire read lock of a queue rwlock
* @lock: Pointer to queue rwlock structure
*/
-static inline void queue_read_lock(struct qrwlock *lock)
+static inline void queued_read_lock(struct qrwlock *lock)
{
u32 cnts;
- cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
+ cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
if (likely(!(cnts & _QW_WMASK)))
return;
/* The slowpath will decrement the reader count, if necessary. */
- queue_read_lock_slowpath(lock);
+ queued_read_lock_slowpath(lock, cnts);
}
/**
- * queue_write_lock - acquire write lock of a queue rwlock
+ * queued_write_lock - acquire write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
-static inline void queue_write_lock(struct qrwlock *lock)
+static inline void queued_write_lock(struct qrwlock *lock)
{
/* Optimize for the unfair lock case where the fair flag is 0. */
- if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0)
+ if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
return;
- queue_write_lock_slowpath(lock);
+ queued_write_lock_slowpath(lock);
}
/**
- * queue_read_unlock - release read lock of a queue rwlock
+ * queued_read_unlock - release read lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
-static inline void queue_read_unlock(struct qrwlock *lock)
+static inline void queued_read_unlock(struct qrwlock *lock)
{
/*
* Atomically decrement the reader count
*/
- smp_mb__before_atomic();
- atomic_sub(_QR_BIAS, &lock->cnts);
+ (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
}
-#ifndef queue_write_unlock
/**
- * queue_write_unlock - release write lock of a queue rwlock
+ * queued_write_unlock - release write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
-static inline void queue_write_unlock(struct qrwlock *lock)
+static inline void queued_write_unlock(struct qrwlock *lock)
{
- /*
- * If the writer field is atomic, it can be cleared directly.
- * Otherwise, an atomic subtraction will be used to clear it.
- */
- smp_mb__before_atomic();
- atomic_sub(_QW_LOCKED, &lock->cnts);
+ smp_store_release((u8 *)&lock->cnts, 0);
}
-#endif
/*
* Remapping rwlock architecture specific functions to the corresponding
* queue rwlock functions.
*/
-#define arch_read_can_lock(l) queue_read_can_lock(l)
-#define arch_write_can_lock(l) queue_write_can_lock(l)
-#define arch_read_lock(l) queue_read_lock(l)
-#define arch_write_lock(l) queue_write_lock(l)
-#define arch_read_trylock(l) queue_read_trylock(l)
-#define arch_write_trylock(l) queue_write_trylock(l)
-#define arch_read_unlock(l) queue_read_unlock(l)
-#define arch_write_unlock(l) queue_write_unlock(l)
+#define arch_read_can_lock(l) queued_read_can_lock(l)
+#define arch_write_can_lock(l) queued_write_can_lock(l)
+#define arch_read_lock(l) queued_read_lock(l)
+#define arch_write_lock(l) queued_write_lock(l)
+#define arch_read_trylock(l) queued_read_trylock(l)
+#define arch_write_trylock(l) queued_write_trylock(l)
+#define arch_read_unlock(l) queued_read_unlock(l)
+#define arch_write_unlock(l) queued_write_unlock(l)
#endif /* __ASM_GENERIC_QRWLOCK_H */
diff --git a/kernel/include/asm-generic/qrwlock_types.h b/kernel/include/asm-generic/qrwlock_types.h
index 4d76f24df..0abc6b606 100644
--- a/kernel/include/asm-generic/qrwlock_types.h
+++ b/kernel/include/asm-generic/qrwlock_types.h
@@ -10,12 +10,12 @@
typedef struct qrwlock {
atomic_t cnts;
- arch_spinlock_t lock;
+ arch_spinlock_t wait_lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { \
.cnts = ATOMIC_INIT(0), \
- .lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
}
#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */
diff --git a/kernel/include/asm-generic/qspinlock.h b/kernel/include/asm-generic/qspinlock.h
new file mode 100644
index 000000000..e2aadbc71
--- /dev/null
+++ b/kernel/include/asm-generic/qspinlock.h
@@ -0,0 +1,139 @@
+/*
+ * Queued spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_H
+#define __ASM_GENERIC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+/**
+ * queued_spin_is_locked - is the spinlock locked?
+ * @lock: Pointer to queued spinlock structure
+ * Return: 1 if it is locked, 0 otherwise
+ */
+static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+ return atomic_read(&lock->val);
+}
+
+/**
+ * queued_spin_value_unlocked - is the spinlock structure unlocked?
+ * @lock: queued spinlock structure
+ * Return: 1 if it is unlocked, 0 otherwise
+ *
+ * N.B. Whenever there are tasks waiting for the lock, it is considered
+ * locked wrt the lockref code to avoid lock stealing by the lockref
+ * code and change things underneath the lock. This also allows some
+ * optimizations to be applied without conflict with lockref.
+ */
+static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
+{
+ return !atomic_read(&lock.val);
+}
+
+/**
+ * queued_spin_is_contended - check if the lock is contended
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock contended, 0 otherwise
+ */
+static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
+{
+ return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
+}
+/**
+ * queued_spin_trylock - try to acquire the queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queued_spin_trylock(struct qspinlock *lock)
+{
+ if (!atomic_read(&lock->val) &&
+ (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
+ return 1;
+ return 0;
+}
+
+extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+/**
+ * queued_spin_lock - acquire a queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_lock(struct qspinlock *lock)
+{
+ u32 val;
+
+ val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
+ if (likely(val == 0))
+ return;
+ queued_spin_lock_slowpath(lock, val);
+}
+
+#ifndef queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ /*
+ * smp_mb__before_atomic() in order to guarantee release semantics
+ */
+ smp_mb__before_atomic_dec();
+ atomic_sub(_Q_LOCKED_VAL, &lock->val);
+}
+#endif
+
+/**
+ * queued_spin_unlock_wait - wait until current lock holder releases the lock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * There is a very slight possibility of live-lock if the lockers keep coming
+ * and the waiter is just unfortunate enough to not see any unlock state.
+ */
+static inline void queued_spin_unlock_wait(struct qspinlock *lock)
+{
+ while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+ cpu_relax();
+}
+
+#ifndef virt_spin_lock
+static __always_inline bool virt_spin_lock(struct qspinlock *lock)
+{
+ return false;
+}
+#endif
+
+/*
+ * Initializier
+ */
+#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
+
+/*
+ * Remapping spinlock architecture specific functions to the corresponding
+ * queued spinlock functions.
+ */
+#define arch_spin_is_locked(l) queued_spin_is_locked(l)
+#define arch_spin_is_contended(l) queued_spin_is_contended(l)
+#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
+#define arch_spin_lock(l) queued_spin_lock(l)
+#define arch_spin_trylock(l) queued_spin_trylock(l)
+#define arch_spin_unlock(l) queued_spin_unlock(l)
+#define arch_spin_lock_flags(l, f) queued_spin_lock(l)
+#define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/kernel/include/asm-generic/qspinlock_types.h b/kernel/include/asm-generic/qspinlock_types.h
new file mode 100644
index 000000000..85f888e86
--- /dev/null
+++ b/kernel/include/asm-generic/qspinlock_types.h
@@ -0,0 +1,79 @@
+/*
+ * Queued spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
+#define __ASM_GENERIC_QSPINLOCK_TYPES_H
+
+/*
+ * Including atomic.h with PARAVIRT on will cause compilation errors because
+ * of recursive header file incluson via paravirt_types.h. So don't include
+ * it if PARAVIRT is on.
+ */
+#ifndef CONFIG_PARAVIRT
+#include <linux/types.h>
+#include <linux/atomic.h>
+#endif
+
+typedef struct qspinlock {
+ atomic_t val;
+} arch_spinlock_t;
+
+/*
+ * Bitfields in the atomic value:
+ *
+ * When NR_CPUS < 16K
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-15: not used
+ * 16-17: tail index
+ * 18-31: tail cpu (+1)
+ *
+ * When NR_CPUS >= 16K
+ * 0- 7: locked byte
+ * 8: pending
+ * 9-10: tail index
+ * 11-31: tail cpu (+1)
+ */
+#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
+ << _Q_ ## type ## _OFFSET)
+#define _Q_LOCKED_OFFSET 0
+#define _Q_LOCKED_BITS 8
+#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
+
+#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
+#if CONFIG_NR_CPUS < (1U << 14)
+#define _Q_PENDING_BITS 8
+#else
+#define _Q_PENDING_BITS 1
+#endif
+#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
+
+#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
+#define _Q_TAIL_IDX_BITS 2
+#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
+
+#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
+#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
+#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
+
+#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
+#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
+
+#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
+#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
diff --git a/kernel/include/asm-generic/rtc.h b/kernel/include/asm-generic/rtc.h
index fa86f240c..4e3b65583 100644
--- a/kernel/include/asm-generic/rtc.h
+++ b/kernel/include/asm-generic/rtc.h
@@ -16,6 +16,9 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/delay.h>
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
#define RTC_PIE 0x40 /* periodic interrupt enable */
#define RTC_AIE 0x20 /* alarm interrupt enable */
@@ -46,6 +49,7 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
{
unsigned char ctrl;
unsigned long flags;
+ unsigned char century = 0;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_year;
@@ -79,6 +83,11 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
#ifdef CONFIG_MACH_DECSTATION
real_year = CMOS_READ(RTC_DEC_YEAR);
#endif
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century)
+ century = CMOS_READ(acpi_gbl_FADT.century);
+#endif
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irqrestore(&rtc_lock, flags);
@@ -90,12 +99,16 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
time->tm_mday = bcd2bin(time->tm_mday);
time->tm_mon = bcd2bin(time->tm_mon);
time->tm_year = bcd2bin(time->tm_year);
+ century = bcd2bin(century);
}
#ifdef CONFIG_MACH_DECSTATION
time->tm_year += real_year - 72;
#endif
+ if (century)
+ time->tm_year += (century - 19) * 100;
+
/*
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
@@ -122,6 +135,7 @@ static inline int __set_rtc_time(struct rtc_time *time)
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_yrs, leap_yr;
#endif
+ unsigned char century = 0;
yrs = time->tm_year;
mon = time->tm_mon + 1; /* tm_mon starts at zero */
@@ -150,6 +164,15 @@ static inline int __set_rtc_time(struct rtc_time *time)
yrs = 73;
}
#endif
+
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century) {
+ century = (yrs + 1900) / 100;
+ yrs %= 100;
+ }
+#endif
+
/* These limits and adjustments are independent of
* whether the chip is in binary mode or not.
*/
@@ -169,6 +192,7 @@ static inline int __set_rtc_time(struct rtc_time *time)
day = bin2bcd(day);
mon = bin2bcd(mon);
yrs = bin2bcd(yrs);
+ century = bin2bcd(century);
}
save_control = CMOS_READ(RTC_CONTROL);
@@ -185,6 +209,11 @@ static inline int __set_rtc_time(struct rtc_time *time)
CMOS_WRITE(hrs, RTC_HOURS);
CMOS_WRITE(min, RTC_MINUTES);
CMOS_WRITE(sec, RTC_SECONDS);
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century)
+ CMOS_WRITE(century, acpi_gbl_FADT.century);
+#endif
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
diff --git a/kernel/include/asm-generic/rwsem.h b/kernel/include/asm-generic/rwsem.h
index d48bf5a95..d6d5dc98d 100644
--- a/kernel/include/asm-generic/rwsem.h
+++ b/kernel/include/asm-generic/rwsem.h
@@ -33,7 +33,7 @@
*/
static inline void __down_read(struct rw_semaphore *sem)
{
- if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
+ if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0))
rwsem_down_read_failed(sem);
}
@@ -42,7 +42,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
long tmp;
while ((tmp = sem->count) >= 0) {
- if (tmp == cmpxchg(&sem->count, tmp,
+ if (tmp == cmpxchg_acquire(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
return 1;
}
@@ -57,7 +57,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{
long tmp;
- tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+ tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_long_t *)&sem->count);
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
rwsem_down_write_failed(sem);
@@ -72,7 +72,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
{
long tmp;
- tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+ tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
return tmp == RWSEM_UNLOCKED_VALUE;
}
@@ -84,7 +84,7 @@ static inline void __up_read(struct rw_semaphore *sem)
{
long tmp;
- tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
+ tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count);
if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
rwsem_wake(sem);
}
@@ -94,7 +94,7 @@ static inline void __up_read(struct rw_semaphore *sem)
*/
static inline void __up_write(struct rw_semaphore *sem)
{
- if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+ if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_long_t *)&sem->count) < 0))
rwsem_wake(sem);
}
@@ -114,7 +114,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
{
long tmp;
- tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
+ /*
+ * When downgrading from exclusive to shared ownership,
+ * anything inside the write-locked region cannot leak
+ * into the read side. In contrast, anything in the
+ * read-locked region is ok to be re-ordered into the
+ * write side. As such, rely on RELEASE semantics.
+ */
+ tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS,
(atomic_long_t *)&sem->count);
if (tmp < 0)
rwsem_downgrade_wake(sem);
diff --git a/kernel/include/asm-generic/scatterlist.h b/kernel/include/asm-generic/scatterlist.h
deleted file mode 100644
index 5de07355f..000000000
--- a/kernel/include/asm-generic/scatterlist.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __ASM_GENERIC_SCATTERLIST_H
-#define __ASM_GENERIC_SCATTERLIST_H
-
-#include <linux/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
- unsigned long page_link;
- unsigned int offset;
- unsigned int length;
- dma_addr_t dma_address;
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- unsigned int dma_length;
-#endif
-};
-
-/*
- * These macros should be used after a dma_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
-#define sg_dma_len(sg) ((sg)->dma_length)
-#else
-#define sg_dma_len(sg) ((sg)->length)
-#endif
-
-#endif /* __ASM_GENERIC_SCATTERLIST_H */
diff --git a/kernel/include/asm-generic/tlb.h b/kernel/include/asm-generic/tlb.h
index db284bff2..9dbb739ca 100644
--- a/kernel/include/asm-generic/tlb.h
+++ b/kernel/include/asm-generic/tlb.h
@@ -5,7 +5,7 @@
* Copyright 2001 Red Hat, Inc.
* Based on code from mm/memory.c Copyright Linus Torvalds and others.
*
- * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright 2011 Red Hat, Inc., Peter Zijlstra
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/kernel/include/asm-generic/uaccess.h b/kernel/include/asm-generic/uaccess.h
index 72d880383..1bfa60295 100644
--- a/kernel/include/asm-generic/uaccess.h
+++ b/kernel/include/asm-generic/uaccess.h
@@ -163,9 +163,10 @@ static inline __must_check long __copy_to_user(void __user *to,
#define put_user(x, ptr) \
({ \
+ void *__p = (ptr); \
might_fault(); \
- access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \
- __put_user(x, ptr) : \
+ access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ? \
+ __put_user((x), ((__typeof__(*(ptr)) *)__p)) : \
-EFAULT; \
})
@@ -225,9 +226,10 @@ extern int __put_user_bad(void) __attribute__((noreturn));
#define get_user(x, ptr) \
({ \
+ const void *__p = (ptr); \
might_fault(); \
- access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \
- __get_user(x, ptr) : \
+ access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
+ __get_user((x), (__typeof__(*(ptr)) *)__p) : \
-EFAULT; \
})
diff --git a/kernel/include/asm-generic/vmlinux.lds.h b/kernel/include/asm-generic/vmlinux.lds.h
index 8bd374d3c..c4bd0e2c1 100644
--- a/kernel/include/asm-generic/vmlinux.lds.h
+++ b/kernel/include/asm-generic/vmlinux.lds.h
@@ -181,6 +181,16 @@
#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
+#ifdef CONFIG_ACPI
+#define ACPI_PROBE_TABLE(name) \
+ . = ALIGN(8); \
+ VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \
+ *(__##name##_acpi_probe_table) \
+ VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .;
+#else
+#define ACPI_PROBE_TABLE(name)
+#endif
+
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__dtb_start) = .; \
@@ -412,12 +422,10 @@
* during second ld run in second ld pass when generating System.map */
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
- *(.text.hot) \
- *(.text .text.fixup) \
+ *(.text.hot .text .text.fixup .text.unlikely) \
*(.ref.text) \
MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \
- *(.text.unlikely)
/* sched.text is aling to function alignment to secure we have same
@@ -516,6 +524,8 @@
CPUIDLE_METHOD_OF_TABLES() \
KERNEL_DTB() \
IRQCHIP_OF_MATCH_TABLE() \
+ ACPI_PROBE_TABLE(irqchip) \
+ ACPI_PROBE_TABLE(clksrc) \
EARLYCON_TABLE() \
EARLYCON_OF_TABLES()
diff --git a/kernel/include/asm-generic/word-at-a-time.h b/kernel/include/asm-generic/word-at-a-time.h
index 94f9ea8ab..011dde083 100644
--- a/kernel/include/asm-generic/word-at-a-time.h
+++ b/kernel/include/asm-generic/word-at-a-time.h
@@ -1,15 +1,10 @@
#ifndef _ASM_WORD_AT_A_TIME_H
#define _ASM_WORD_AT_A_TIME_H
-/*
- * This says "generic", but it's actually big-endian only.
- * Little-endian can use more efficient versions of these
- * interfaces, see for example
- * arch/x86/include/asm/word-at-a-time.h
- * for those.
- */
-
#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
struct word_at_a_time {
const unsigned long high_bits, low_bits;
@@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
#define zero_bytemask(mask) (~1ul << __fls(mask))
#endif
+#else
+
+/*
+ * The optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+ return mask*0x0001020304050608ul >> 56;
+}
+
+#else /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+
+#endif /* __BIG_ENDIAN */
+
#endif /* _ASM_WORD_AT_A_TIME_H */