summaryrefslogtreecommitdiffstats
path: root/kernel/include/linux/rcupdate.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/include/linux/rcupdate.h')
-rw-r--r--kernel/include/linux/rcupdate.h255
1 files changed, 101 insertions, 154 deletions
diff --git a/kernel/include/linux/rcupdate.h b/kernel/include/linux/rcupdate.h
index 5d090cdaa..c2f5f9551 100644
--- a/kernel/include/linux/rcupdate.h
+++ b/kernel/include/linux/rcupdate.h
@@ -44,6 +44,8 @@
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
+#include <linux/ktime.h>
+
#include <asm/barrier.h>
extern int rcu_expedited; /* for sysctl */
@@ -158,7 +160,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
* more than one CPU).
*/
void call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
+ rcu_callback_t func);
#else /* #ifdef CONFIG_PREEMPT_RCU */
@@ -192,7 +194,7 @@ void call_rcu(struct rcu_head *head,
* memory ordering guarantees.
*/
void call_rcu_bh(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
+ rcu_callback_t func);
#endif
/**
@@ -215,7 +217,7 @@ void call_rcu_bh(struct rcu_head *head,
* memory ordering guarantees.
*/
void call_rcu_sched(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu));
+ rcu_callback_t func);
void synchronize_sched(void);
@@ -228,6 +230,36 @@ struct rcu_synchronize {
};
void wakeme_after_rcu(struct rcu_head *head);
+void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
+ struct rcu_synchronize *rs_array);
+
+#define _wait_rcu_gp(checktiny, ...) \
+do { \
+ call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
+ struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \
+ __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \
+ __crcu_array, __rs_array); \
+} while (0)
+
+#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
+
+/**
+ * synchronize_rcu_mult - Wait concurrently for multiple grace periods
+ * @...: List of call_rcu() functions for the flavors to wait on.
+ *
+ * This macro waits concurrently for multiple flavors of RCU grace periods.
+ * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait
+ * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU
+ * domain requires you to write a wrapper function for that SRCU domain's
+ * call_srcu() function, supplying the corresponding srcu_struct.
+ *
+ * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
+ * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called
+ * is automatically a grace period.
+ */
+#define synchronize_rcu_mult(...) \
+ _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
+
/**
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period
* @head: structure to be used for queueing the RCU updates.
@@ -246,7 +278,7 @@ void wakeme_after_rcu(struct rcu_head *head);
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
-void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
+void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
void rcu_barrier_tasks(void);
@@ -274,12 +306,14 @@ static inline int sched_rcu_preempt_depth(void) { return 0; }
static inline void __rcu_read_lock(void)
{
- preempt_disable();
+ if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ preempt_disable();
}
static inline void __rcu_read_unlock(void)
{
- preempt_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ preempt_enable();
}
static inline void synchronize_rcu(void)
@@ -303,10 +337,6 @@ void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
struct notifier_block;
-void rcu_idle_enter(void);
-void rcu_idle_exit(void);
-void rcu_irq_enter(void);
-void rcu_irq_exit(void);
int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
@@ -322,7 +352,7 @@ static inline void rcu_sysrq_end(void)
}
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_NO_HZ_FULL
void rcu_user_enter(void);
void rcu_user_exit(void);
#else
@@ -330,7 +360,7 @@ static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
-#endif /* CONFIG_RCU_USER_QS */
+#endif /* CONFIG_NO_HZ_FULL */
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
@@ -375,8 +405,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch(t) \
do { \
rcu_all_qs(); \
- if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
+ if (READ_ONCE((t)->rcu_tasks_holdout)) \
+ WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
#else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
@@ -405,10 +435,6 @@ bool __rcu_is_watching(void);
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
-typedef void call_rcu_func_t(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
-void wait_rcu_gp(call_rcu_func_t crf);
-
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
@@ -489,46 +515,10 @@ int rcu_read_lock_bh_held(void);
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
* RCU-sched read-side critical section. In absence of
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
- * critical section unless it can prove otherwise. Note that disabling
- * of preemption (including disabling irqs) counts as an RCU-sched
- * read-side critical section. This is useful for debug checks in functions
- * that required that they be called within an RCU-sched read-side
- * critical section.
- *
- * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
- * and while lockdep is disabled.
- *
- * Note that if the CPU is in the idle loop from an RCU point of
- * view (ie: that we are in the section between rcu_idle_enter() and
- * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
- * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
- * that are in such a section, considering these as in extended quiescent
- * state, so such a CPU is effectively never in an RCU read-side critical
- * section regardless of what RCU primitives it invokes. This state of
- * affairs is required --- we need to keep an RCU-free window in idle
- * where the CPU may possibly enter into low power mode. This way we can
- * notice an extended quiescent state to other CPUs that started a grace
- * period. Otherwise we would delay any grace period as long as we run in
- * the idle task.
- *
- * Similarly, we avoid claiming an SRCU read lock held if the current
- * CPU is offline.
+ * critical section unless it can prove otherwise.
*/
#ifdef CONFIG_PREEMPT_COUNT
-static inline int rcu_read_lock_sched_held(void)
-{
- int lockdep_opinion = 0;
-
- if (!debug_lockdep_rcu_enabled())
- return 1;
- if (!rcu_is_watching())
- return 0;
- if (!rcu_lockdep_current_cpu_online())
- return 0;
- if (debug_locks)
- lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
- return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
-}
+int rcu_read_lock_sched_held(void);
#else /* #ifdef CONFIG_PREEMPT_COUNT */
static inline int rcu_read_lock_sched_held(void)
{
@@ -568,14 +558,14 @@ static inline int rcu_read_lock_sched_held(void)
#ifdef CONFIG_PROVE_RCU
/**
- * rcu_lockdep_assert - emit lockdep splat if specified condition not met
+ * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
* @c: condition to check
* @s: informative message
*/
-#define rcu_lockdep_assert(c, s) \
+#define RCU_LOCKDEP_WARN(c, s) \
do { \
static bool __section(.data.unlikely) __warned; \
- if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
+ if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
@@ -584,8 +574,8 @@ static inline int rcu_read_lock_sched_held(void)
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
static inline void rcu_preempt_sleep_check(void)
{
- rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
- "Illegal context switch in RCU read-side critical section");
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
+ "Illegal context switch in RCU read-side critical section");
}
#else /* #ifdef CONFIG_PROVE_RCU */
static inline void rcu_preempt_sleep_check(void)
@@ -596,15 +586,15 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_sleep_check() \
do { \
rcu_preempt_sleep_check(); \
- rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
- "Illegal context switch in RCU-bh read-side critical section"); \
- rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \
- "Illegal context switch in RCU-sched read-side critical section"); \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
+ "Illegal context switch in RCU-bh read-side critical section"); \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
+ "Illegal context switch in RCU-sched read-side critical section"); \
} while (0)
#else /* #ifdef CONFIG_PROVE_RCU */
-#define rcu_lockdep_assert(c, s) do { } while (0)
+#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
#define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -627,7 +617,7 @@ static inline void rcu_preempt_sleep_check(void)
#define __rcu_access_pointer(p, space) \
({ \
- typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
@@ -635,32 +625,17 @@ static inline void rcu_preempt_sleep_check(void)
({ \
/* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
- rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
+ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
- rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
+ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
-#define __rcu_access_index(p, space) \
-({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- rcu_dereference_sparse(p, space); \
- (_________p1); \
-})
-#define __rcu_dereference_index_check(p, c) \
-({ \
- /* Dependency order vs. p above. */ \
- typeof(p) _________p1 = lockless_dereference(p); \
- rcu_lockdep_assert(c, \
- "suspicious rcu_dereference_index_check() usage"); \
- (_________p1); \
-})
-
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
* @v: The value to statically initialize with.
@@ -668,21 +643,6 @@ static inline void rcu_preempt_sleep_check(void)
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
/**
- * lockless_dereference() - safely load a pointer for later dereference
- * @p: The pointer to load
- *
- * Similar to rcu_dereference(), but for situations where the pointed-to
- * object's lifetime is managed by something other than RCU. That
- * "something other" might be reference counting or simple immortality.
- */
-#define lockless_dereference(p) \
-({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
- (_________p1); \
-})
-
-/**
* rcu_assign_pointer() - assign to RCU-protected pointer
* @p: pointer to assign to
* @v: value to assign (publish)
@@ -720,7 +680,7 @@ static inline void rcu_preempt_sleep_check(void)
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
+ * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
* when the value of this pointer is accessed, but the pointer is not
* dereferenced, for example, when testing an RCU-protected pointer against
* NULL. Although rcu_access_pointer() may also be used in cases where
@@ -805,47 +765,12 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
/**
- * rcu_access_index() - fetch RCU index with no dereferencing
- * @p: The index to read
- *
- * Return the value of the specified RCU-protected index, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
- * when the value of this index is accessed, but the index is not
- * dereferenced, for example, when testing an RCU-protected index against
- * -1. Although rcu_access_index() may also be used in cases where
- * update-side locks prevent the value of the index from changing, you
- * should instead use rcu_dereference_index_protected() for this use case.
- */
-#define rcu_access_index(p) __rcu_access_index((p), __rcu)
-
-/**
- * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
- * @p: The pointer to read, prior to dereferencing
- * @c: The conditions under which the dereference will take place
- *
- * Similar to rcu_dereference_check(), but omits the sparse checking.
- * This allows rcu_dereference_index_check() to be used on integers,
- * which can then be used as array indices. Attempting to use
- * rcu_dereference_check() on an integer will give compiler warnings
- * because the sparse address-space mechanism relies on dereferencing
- * the RCU-protected pointer. Dereferencing integers is not something
- * that even gcc will put up with.
- *
- * Note that this function does not implicitly check for RCU read-side
- * critical sections. If this function gains lots of uses, it might
- * make sense to provide versions for each flavor of RCU, but it does
- * not make sense as of early 2010.
- */
-#define rcu_dereference_index_check(p, c) \
- __rcu_dereference_index_check((p), (c))
-
-/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
+ * both the smp_read_barrier_depends() and the READ_ONCE(). This
* is useful in cases where update-side locks prevent the value of the
* pointer from changing. Please note that this primitive does -not-
* prevent the compiler from repeating this reference or combining it
@@ -885,6 +810,28 @@ static inline void rcu_preempt_sleep_check(void)
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
/**
+ * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
+ * @p: The pointer to hand off
+ *
+ * This is simply an identity function, but it documents where a pointer
+ * is handed off from RCU to some other synchronization mechanism, for
+ * example, reference counting or locking. In C11, it would map to
+ * kill_dependency(). It could be used as follows:
+ *
+ * rcu_read_lock();
+ * p = rcu_dereference(gp);
+ * long_lived = is_long_lived(p);
+ * if (long_lived) {
+ * if (!atomic_inc_not_zero(p->refcnt))
+ * long_lived = false;
+ * else
+ * p = rcu_pointer_handoff(p);
+ * }
+ * rcu_read_unlock();
+ */
+#define rcu_pointer_handoff(p) (p)
+
+/**
* rcu_read_lock() - mark the beginning of an RCU read-side critical section
*
* When synchronize_rcu() is invoked on one CPU while other CPUs
@@ -930,8 +877,8 @@ static inline void rcu_read_lock(void)
__rcu_read_lock();
__acquire(RCU);
rcu_lock_acquire(&rcu_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock() used illegally while idle");
}
/*
@@ -981,8 +928,8 @@ static inline void rcu_read_lock(void)
*/
static inline void rcu_read_unlock(void)
{
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock() used illegally while idle");
__release(RCU);
__rcu_read_unlock();
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
@@ -1013,8 +960,8 @@ static inline void rcu_read_lock_bh(void)
#else
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock_bh() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock_bh() used illegally while idle");
#endif
}
@@ -1028,8 +975,8 @@ static inline void rcu_read_unlock_bh(void)
#ifdef CONFIG_PREEMPT_RT_FULL
rcu_read_unlock();
#else
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock_bh() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
#endif
@@ -1054,8 +1001,8 @@ static inline void rcu_read_lock_sched(void)
preempt_disable();
__acquire(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock_sched() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock_sched() used illegally while idle");
}
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -1072,8 +1019,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
*/
static inline void rcu_read_unlock_sched(void)
{
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock_sched() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED);
preempt_enable();
@@ -1124,7 +1071,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define RCU_INIT_POINTER(p, v) \
do { \
rcu_dereference_sparse(p, __rcu); \
- p = RCU_INITIALIZER(v); \
+ WRITE_ONCE(p, RCU_INITIALIZER(v)); \
} while (0)
/**
@@ -1147,7 +1094,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define __kfree_rcu(head, offset) \
do { \
BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
- kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
+ kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
} while (0)
/**
@@ -1179,13 +1126,13 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define kfree_rcu(ptr, rcu_head) \
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
-#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
-static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
+#ifdef CONFIG_TINY_RCU
+static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
{
- *delta_jiffies = ULONG_MAX;
+ *nextevt = KTIME_MAX;
return 0;
}
-#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
+#endif /* #ifdef CONFIG_TINY_RCU */
#if defined(CONFIG_RCU_NOCB_CPU_ALL)
static inline bool rcu_is_nocb_cpu(int cpu) { return true; }