summaryrefslogtreecommitdiffstats
path: root/kernel/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/include/asm-generic')
-rw-r--r--kernel/include/asm-generic/bitops/lock.h14
-rw-r--r--kernel/include/asm-generic/futex.h8
-rw-r--r--kernel/include/asm-generic/preempt.h4
-rw-r--r--kernel/include/asm-generic/qspinlock.h32
-rw-r--r--kernel/include/asm-generic/siginfo.h15
-rw-r--r--kernel/include/asm-generic/uaccess.h20
-rw-r--r--kernel/include/asm-generic/vmlinux.lds.h4
7 files changed, 51 insertions, 46 deletions
diff --git a/kernel/include/asm-generic/bitops/lock.h b/kernel/include/asm-generic/bitops/lock.h
index c30266e94..8ef0ccbf8 100644
--- a/kernel/include/asm-generic/bitops/lock.h
+++ b/kernel/include/asm-generic/bitops/lock.h
@@ -29,16 +29,16 @@ do { \
* @nr: the bit to set
* @addr: the address to start counting from
*
- * This operation is like clear_bit_unlock, however it is not atomic.
- * It does provide release barrier semantics so it can be used to unlock
- * a bit lock, however it would only be used if no other CPU can modify
- * any bits in the memory until the lock is released (a good example is
- * if the bit lock itself protects access to the other bits in the word).
+ * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
+ * the bits in the word are protected by this lock some archs can use weaker
+ * ops to safely unlock.
+ *
+ * See for example x86's implementation.
*/
#define __clear_bit_unlock(nr, addr) \
do { \
- smp_mb(); \
- __clear_bit(nr, addr); \
+ smp_mb__before_atomic(); \
+ clear_bit(nr, addr); \
} while (0)
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/kernel/include/asm-generic/futex.h b/kernel/include/asm-generic/futex.h
index e56272c91..bf2d34c9d 100644
--- a/kernel/include/asm-generic/futex.h
+++ b/kernel/include/asm-generic/futex.h
@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 val;
preempt_disable();
- if (unlikely(get_user(val, uaddr) != 0))
+ if (unlikely(get_user(val, uaddr) != 0)) {
+ preempt_enable();
return -EFAULT;
+ }
- if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
+ if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
+ preempt_enable();
return -EFAULT;
+ }
*uval = val;
preempt_enable();
diff --git a/kernel/include/asm-generic/preempt.h b/kernel/include/asm-generic/preempt.h
index 5d8ffa3e6..c1cde3577 100644
--- a/kernel/include/asm-generic/preempt.h
+++ b/kernel/include/asm-generic/preempt.h
@@ -7,10 +7,10 @@
static __always_inline int preempt_count(void)
{
- return current_thread_info()->preempt_count;
+ return READ_ONCE(current_thread_info()->preempt_count);
}
-static __always_inline int *preempt_count_ptr(void)
+static __always_inline volatile int *preempt_count_ptr(void)
{
return &current_thread_info()->preempt_count;
}
diff --git a/kernel/include/asm-generic/qspinlock.h b/kernel/include/asm-generic/qspinlock.h
index e2aadbc71..1885fc44b 100644
--- a/kernel/include/asm-generic/qspinlock.h
+++ b/kernel/include/asm-generic/qspinlock.h
@@ -21,14 +21,33 @@
#include <asm-generic/qspinlock_types.h>
/**
+ * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * There is a very slight possibility of live-lock if the lockers keep coming
+ * and the waiter is just unfortunate enough to not see any unlock state.
+ */
+#ifndef queued_spin_unlock_wait
+extern void queued_spin_unlock_wait(struct qspinlock *lock);
+#endif
+
+/**
* queued_spin_is_locked - is the spinlock locked?
* @lock: Pointer to queued spinlock structure
* Return: 1 if it is locked, 0 otherwise
*/
+#ifndef queued_spin_is_locked
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
+ /*
+ * See queued_spin_unlock_wait().
+ *
+ * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
+ * isn't immediately observable.
+ */
return atomic_read(&lock->val);
}
+#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -98,19 +117,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
}
#endif
-/**
- * queued_spin_unlock_wait - wait until current lock holder releases the lock
- * @lock : Pointer to queued spinlock structure
- *
- * There is a very slight possibility of live-lock if the lockers keep coming
- * and the waiter is just unfortunate enough to not see any unlock state.
- */
-static inline void queued_spin_unlock_wait(struct qspinlock *lock)
-{
- while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
- cpu_relax();
-}
-
#ifndef virt_spin_lock
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{
diff --git a/kernel/include/asm-generic/siginfo.h b/kernel/include/asm-generic/siginfo.h
index 3d1a3af5c..a2508a8f9 100644
--- a/kernel/include/asm-generic/siginfo.h
+++ b/kernel/include/asm-generic/siginfo.h
@@ -17,21 +17,6 @@
struct siginfo;
void do_schedule_next_timer(struct siginfo *info);
-#ifndef HAVE_ARCH_COPY_SIGINFO
-
-#include <linux/string.h>
-
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
-{
- if (from->si_code < 0)
- memcpy(to, from, sizeof(*to));
- else
- /* _sigchld is currently the largest know union member */
- memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
-}
-
-#endif
-
extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
#endif
diff --git a/kernel/include/asm-generic/uaccess.h b/kernel/include/asm-generic/uaccess.h
index 1bfa60295..32901d11f 100644
--- a/kernel/include/asm-generic/uaccess.h
+++ b/kernel/include/asm-generic/uaccess.h
@@ -230,14 +230,18 @@ extern int __put_user_bad(void) __attribute__((noreturn));
might_fault(); \
access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
__get_user((x), (__typeof__(*(ptr)) *)__p) : \
- -EFAULT; \
+ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
})
#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
- size = __copy_from_user(x, ptr, size);
- return size ? -EFAULT : size;
+ size_t n = __copy_from_user(x, ptr, size);
+ if (unlikely(n)) {
+ memset(x + (size - n), 0, n);
+ return -EFAULT;
+ }
+ return 0;
}
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
@@ -257,11 +261,13 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
+ unsigned long res = n;
might_fault();
- if (access_ok(VERIFY_READ, from, n))
- return __copy_from_user(to, from, n);
- else
- return n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = __copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
static inline long copy_to_user(void __user *to,
diff --git a/kernel/include/asm-generic/vmlinux.lds.h b/kernel/include/asm-generic/vmlinux.lds.h
index c4bd0e2c1..ef2e8c97e 100644
--- a/kernel/include/asm-generic/vmlinux.lds.h
+++ b/kernel/include/asm-generic/vmlinux.lds.h
@@ -531,15 +531,19 @@
#define INIT_TEXT \
*(.init.text) \
+ *(.text.startup) \
MEM_DISCARD(init.text)
#define EXIT_DATA \
*(.exit.data) \
+ *(.fini_array) \
+ *(.dtors) \
MEM_DISCARD(exit.data) \
MEM_DISCARD(exit.rodata)
#define EXIT_TEXT \
*(.exit.text) \
+ *(.text.exit) \
MEM_DISCARD(exit.text)
#define EXIT_CALL \