xref: /linux/kernel/rcu/update.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update mechanism for mutual exclusion
4  *
5  * Copyright IBM Corporation, 2001
6  *
7  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8  *	    Manfred Spraul <manfred@colorfullife.com>
9  *
10  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
11  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
12  * Papers:
13  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
14  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
15  *
16  * For detailed explanation of Read-Copy Update mechanism see -
17  *		http://lse.sourceforge.net/locking/rcupdate.html
18  *
19  */
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/debug.h>
28 #include <linux/torture.h>
29 #include <linux/atomic.h>
30 #include <linux/bitops.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/cpu.h>
34 #include <linux/mutex.h>
35 #include <linux/export.h>
36 #include <linux/hardirq.h>
37 #include <linux/delay.h>
38 #include <linux/moduleparam.h>
39 #include <linux/kthread.h>
40 #include <linux/tick.h>
41 #include <linux/rcupdate_wait.h>
42 #include <linux/sched/isolation.h>
43 #include <linux/kprobes.h>
44 #include <linux/slab.h>
45 #include <linux/irq_work.h>
46 #include <linux/rcupdate_trace.h>
47 
48 #define CREATE_TRACE_POINTS
49 
50 #include "rcu.h"
51 
52 #ifdef MODULE_PARAM_PREFIX
53 #undef MODULE_PARAM_PREFIX
54 #endif
55 #define MODULE_PARAM_PREFIX "rcupdate."
56 
57 #ifndef CONFIG_TINY_RCU
58 module_param(rcu_expedited, int, 0444);
59 module_param(rcu_normal, int, 0444);
60 static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT);
61 #if !defined(CONFIG_PREEMPT_RT) || defined(CONFIG_NO_HZ_FULL)
62 module_param(rcu_normal_after_boot, int, 0444);
63 #endif
64 #endif /* #ifndef CONFIG_TINY_RCU */
65 
66 #ifdef CONFIG_DEBUG_LOCK_ALLOC
67 /**
68  * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section?
69  * @ret:	Best guess answer if lockdep cannot be relied on
70  *
71  * Returns true if lockdep must be ignored, in which case ``*ret`` contains
72  * the best guess described below.  Otherwise returns false, in which
73  * case ``*ret`` tells the caller nothing and the caller should instead
74  * consult lockdep.
75  *
76  * If CONFIG_DEBUG_LOCK_ALLOC is selected, set ``*ret`` to nonzero iff in an
77  * RCU-sched read-side critical section.  In absence of
78  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
79  * critical section unless it can prove otherwise.  Note that disabling
80  * of preemption (including disabling irqs) counts as an RCU-sched
81  * read-side critical section.  This is useful for debug checks in functions
82  * that required that they be called within an RCU-sched read-side
83  * critical section.
84  *
85  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
86  * and while lockdep is disabled.
87  *
88  * Note that if the CPU is in the idle loop from an RCU point of view (ie:
89  * that we are in the section between ct_idle_enter() and ct_idle_exit())
90  * then rcu_read_lock_held() sets ``*ret`` to false even if the CPU did an
91  * rcu_read_lock().  The reason for this is that RCU ignores CPUs that are
92  * in such a section, considering these as in extended quiescent state,
93  * so such a CPU is effectively never in an RCU read-side critical section
94  * regardless of what RCU primitives it invokes.  This state of affairs is
95  * required --- we need to keep an RCU-free window in idle where the CPU may
96  * possibly enter into low power mode. This way we can notice an extended
97  * quiescent state to other CPUs that started a grace period. Otherwise
98  * we would delay any grace period as long as we run in the idle task.
99  *
100  * Similarly, we avoid claiming an RCU read lock held if the current
101  * CPU is offline.
102  */
rcu_read_lock_held_common(bool * ret)103 static bool rcu_read_lock_held_common(bool *ret)
104 {
105 	if (!debug_lockdep_rcu_enabled()) {
106 		*ret = true;
107 		return true;
108 	}
109 	if (!rcu_is_watching()) {
110 		*ret = false;
111 		return true;
112 	}
113 	if (!rcu_lockdep_current_cpu_online()) {
114 		*ret = false;
115 		return true;
116 	}
117 	return false;
118 }
119 
rcu_read_lock_sched_held(void)120 int rcu_read_lock_sched_held(void)
121 {
122 	bool ret;
123 
124 	if (rcu_read_lock_held_common(&ret))
125 		return ret;
126 	return lock_is_held(&rcu_sched_lock_map) || !preemptible();
127 }
128 EXPORT_SYMBOL(rcu_read_lock_sched_held);
129 #endif
130 
131 #ifndef CONFIG_TINY_RCU
132 
133 /*
134  * Should expedited grace-period primitives always fall back to their
135  * non-expedited counterparts?  Intended for use within RCU.  Note
136  * that if the user specifies both rcu_expedited and rcu_normal, then
137  * rcu_normal wins.  (Except during the time period during boot from
138  * when the first task is spawned until the rcu_set_runtime_mode()
139  * core_initcall() is invoked, at which point everything is expedited.)
140  */
rcu_gp_is_normal(void)141 bool rcu_gp_is_normal(void)
142 {
143 	return READ_ONCE(rcu_normal) &&
144 	       rcu_scheduler_active != RCU_SCHEDULER_INIT;
145 }
146 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
147 
148 static atomic_t rcu_async_hurry_nesting = ATOMIC_INIT(1);
149 /*
150  * Should call_rcu() callbacks be processed with urgency or are
151  * they OK being executed with arbitrary delays?
152  */
rcu_async_should_hurry(void)153 bool rcu_async_should_hurry(void)
154 {
155 	return !IS_ENABLED(CONFIG_RCU_LAZY) ||
156 	       atomic_read(&rcu_async_hurry_nesting);
157 }
158 EXPORT_SYMBOL_GPL(rcu_async_should_hurry);
159 
160 /**
161  * rcu_async_hurry - Make future async RCU callbacks not lazy.
162  *
163  * After a call to this function, future calls to call_rcu()
164  * will be processed in a timely fashion.
165  */
rcu_async_hurry(void)166 void rcu_async_hurry(void)
167 {
168 	if (IS_ENABLED(CONFIG_RCU_LAZY))
169 		atomic_inc(&rcu_async_hurry_nesting);
170 }
171 EXPORT_SYMBOL_GPL(rcu_async_hurry);
172 
173 /**
174  * rcu_async_relax - Make future async RCU callbacks lazy.
175  *
176  * After a call to this function, future calls to call_rcu()
177  * will be processed in a lazy fashion.
178  */
rcu_async_relax(void)179 void rcu_async_relax(void)
180 {
181 	if (IS_ENABLED(CONFIG_RCU_LAZY))
182 		atomic_dec(&rcu_async_hurry_nesting);
183 }
184 EXPORT_SYMBOL_GPL(rcu_async_relax);
185 
186 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
187 /*
188  * Should normal grace-period primitives be expedited?  Intended for
189  * use within RCU.  Note that this function takes the rcu_expedited
190  * sysfs/boot variable and rcu_scheduler_active into account as well
191  * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
192  * until rcu_gp_is_expedited() returns false is a -really- bad idea.
193  */
rcu_gp_is_expedited(void)194 bool rcu_gp_is_expedited(void)
195 {
196 	return rcu_expedited || atomic_read(&rcu_expedited_nesting);
197 }
198 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
199 
200 /**
201  * rcu_expedite_gp - Expedite future RCU grace periods
202  *
203  * After a call to this function, future calls to synchronize_rcu() and
204  * friends act as the corresponding synchronize_rcu_expedited() function
205  * had instead been called.
206  */
rcu_expedite_gp(void)207 void rcu_expedite_gp(void)
208 {
209 	atomic_inc(&rcu_expedited_nesting);
210 }
211 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
212 
213 /**
214  * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
215  *
216  * Undo a prior call to rcu_expedite_gp().  If all prior calls to
217  * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
218  * and if the rcu_expedited sysfs/boot parameter is not set, then all
219  * subsequent calls to synchronize_rcu() and friends will return to
220  * their normal non-expedited behavior.
221  */
rcu_unexpedite_gp(void)222 void rcu_unexpedite_gp(void)
223 {
224 	atomic_dec(&rcu_expedited_nesting);
225 }
226 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
227 
228 static bool rcu_boot_ended __read_mostly;
229 
230 /*
231  * Inform RCU of the end of the in-kernel boot sequence.
232  */
rcu_end_inkernel_boot(void)233 void rcu_end_inkernel_boot(void)
234 {
235 	rcu_unexpedite_gp();
236 	rcu_async_relax();
237 	if (rcu_normal_after_boot)
238 		WRITE_ONCE(rcu_normal, 1);
239 	rcu_boot_ended = true;
240 }
241 
242 /*
243  * Let rcutorture know when it is OK to turn it up to eleven.
244  */
rcu_inkernel_boot_has_ended(void)245 bool rcu_inkernel_boot_has_ended(void)
246 {
247 	return rcu_boot_ended;
248 }
249 EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended);
250 
251 #endif /* #ifndef CONFIG_TINY_RCU */
252 
253 /*
254  * Test each non-SRCU synchronous grace-period wait API.  This is
255  * useful just after a change in mode for these primitives, and
256  * during early boot.
257  */
rcu_test_sync_prims(void)258 void rcu_test_sync_prims(void)
259 {
260 	if (!IS_ENABLED(CONFIG_PROVE_RCU))
261 		return;
262 	pr_info("Running RCU synchronous self tests\n");
263 	synchronize_rcu();
264 	synchronize_rcu_expedited();
265 }
266 
267 #if !defined(CONFIG_TINY_RCU)
268 
269 /*
270  * Switch to run-time mode once RCU has fully initialized.
271  */
rcu_set_runtime_mode(void)272 static int __init rcu_set_runtime_mode(void)
273 {
274 	rcu_test_sync_prims();
275 	rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
276 	kfree_rcu_scheduler_running();
277 	rcu_test_sync_prims();
278 	return 0;
279 }
280 core_initcall(rcu_set_runtime_mode);
281 
282 #endif /* #if !defined(CONFIG_TINY_RCU) */
283 
284 #ifdef CONFIG_DEBUG_LOCK_ALLOC
285 static struct lock_class_key rcu_lock_key;
286 struct lockdep_map rcu_lock_map = {
287 	.name = "rcu_read_lock",
288 	.key = &rcu_lock_key,
289 	.wait_type_outer = LD_WAIT_FREE,
290 	.wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT implies PREEMPT_RCU */
291 };
292 EXPORT_SYMBOL_GPL(rcu_lock_map);
293 
294 static struct lock_class_key rcu_bh_lock_key;
295 struct lockdep_map rcu_bh_lock_map = {
296 	.name = "rcu_read_lock_bh",
297 	.key = &rcu_bh_lock_key,
298 	.wait_type_outer = LD_WAIT_FREE,
299 	.wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
300 };
301 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
302 
303 static struct lock_class_key rcu_sched_lock_key;
304 struct lockdep_map rcu_sched_lock_map = {
305 	.name = "rcu_read_lock_sched",
306 	.key = &rcu_sched_lock_key,
307 	.wait_type_outer = LD_WAIT_FREE,
308 	.wait_type_inner = LD_WAIT_SPIN,
309 };
310 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
311 
312 // Tell lockdep when RCU callbacks are being invoked.
313 static struct lock_class_key rcu_callback_key;
314 struct lockdep_map rcu_callback_map =
315 	STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
316 EXPORT_SYMBOL_GPL(rcu_callback_map);
317 
debug_lockdep_rcu_enabled(void)318 noinstr int notrace debug_lockdep_rcu_enabled(void)
319 {
320 	return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) &&
321 	       current->lockdep_recursion == 0;
322 }
323 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
324 
325 /**
326  * rcu_read_lock_held() - might we be in RCU read-side critical section?
327  *
328  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
329  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
330  * this assumes we are in an RCU read-side critical section unless it can
331  * prove otherwise.  This is useful for debug checks in functions that
332  * require that they be called within an RCU read-side critical section.
333  *
334  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
335  * and while lockdep is disabled.
336  *
337  * Note that rcu_read_lock() and the matching rcu_read_unlock() must
338  * occur in the same context, for example, it is illegal to invoke
339  * rcu_read_unlock() in process context if the matching rcu_read_lock()
340  * was invoked from within an irq handler.
341  *
342  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
343  * offline from an RCU perspective, so check for those as well.
344  */
rcu_read_lock_held(void)345 int rcu_read_lock_held(void)
346 {
347 	bool ret;
348 
349 	if (rcu_read_lock_held_common(&ret))
350 		return ret;
351 	return lock_is_held(&rcu_lock_map);
352 }
353 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
354 
355 /**
356  * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
357  *
358  * Check for bottom half being disabled, which covers both the
359  * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
360  * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
361  * will show the situation.  This is useful for debug checks in functions
362  * that require that they be called within an RCU read-side critical
363  * section.
364  *
365  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
366  *
367  * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
368  * offline from an RCU perspective, so check for those as well.
369  */
rcu_read_lock_bh_held(void)370 int rcu_read_lock_bh_held(void)
371 {
372 	bool ret;
373 
374 	if (rcu_read_lock_held_common(&ret))
375 		return ret;
376 	return in_softirq() || irqs_disabled();
377 }
378 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
379 
rcu_read_lock_any_held(void)380 int rcu_read_lock_any_held(void)
381 {
382 	bool ret;
383 
384 	if (rcu_read_lock_held_common(&ret))
385 		return ret;
386 	if (lock_is_held(&rcu_lock_map) ||
387 	    lock_is_held(&rcu_bh_lock_map) ||
388 	    lock_is_held(&rcu_sched_lock_map))
389 		return 1;
390 	return !preemptible();
391 }
392 EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
393 
394 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
395 
396 /**
397  * wakeme_after_rcu() - Callback function to awaken a task after grace period
398  * @head: Pointer to rcu_head member within rcu_synchronize structure
399  *
400  * Awaken the corresponding task now that a grace period has elapsed.
401  */
wakeme_after_rcu(struct rcu_head * head)402 void wakeme_after_rcu(struct rcu_head *head)
403 {
404 	struct rcu_synchronize *rcu;
405 
406 	rcu = container_of(head, struct rcu_synchronize, head);
407 	complete(&rcu->completion);
408 }
409 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
410 
__wait_rcu_gp(bool checktiny,unsigned int state,int n,call_rcu_func_t * crcu_array,struct rcu_synchronize * rs_array)411 void __wait_rcu_gp(bool checktiny, unsigned int state, int n, call_rcu_func_t *crcu_array,
412 		   struct rcu_synchronize *rs_array)
413 {
414 	int i;
415 	int j;
416 
417 	/* Initialize and register callbacks for each crcu_array element. */
418 	for (i = 0; i < n; i++) {
419 		if (checktiny &&
420 		    (crcu_array[i] == call_rcu)) {
421 			might_sleep();
422 			continue;
423 		}
424 		for (j = 0; j < i; j++)
425 			if (crcu_array[j] == crcu_array[i])
426 				break;
427 		if (j == i) {
428 			init_rcu_head_on_stack(&rs_array[i].head);
429 			init_completion(&rs_array[i].completion);
430 			(crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
431 		}
432 	}
433 
434 	/* Wait for all callbacks to be invoked. */
435 	for (i = 0; i < n; i++) {
436 		if (checktiny &&
437 		    (crcu_array[i] == call_rcu))
438 			continue;
439 		for (j = 0; j < i; j++)
440 			if (crcu_array[j] == crcu_array[i])
441 				break;
442 		if (j == i) {
443 			wait_for_completion_state(&rs_array[i].completion, state);
444 			destroy_rcu_head_on_stack(&rs_array[i].head);
445 		}
446 	}
447 }
448 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
449 
finish_rcuwait(struct rcuwait * w)450 void finish_rcuwait(struct rcuwait *w)
451 {
452 	rcu_assign_pointer(w->task, NULL);
453 	__set_current_state(TASK_RUNNING);
454 }
455 EXPORT_SYMBOL_GPL(finish_rcuwait);
456 
457 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
init_rcu_head(struct rcu_head * head)458 void init_rcu_head(struct rcu_head *head)
459 {
460 	debug_object_init(head, &rcuhead_debug_descr);
461 }
462 EXPORT_SYMBOL_GPL(init_rcu_head);
463 
destroy_rcu_head(struct rcu_head * head)464 void destroy_rcu_head(struct rcu_head *head)
465 {
466 	debug_object_free(head, &rcuhead_debug_descr);
467 }
468 EXPORT_SYMBOL_GPL(destroy_rcu_head);
469 
rcuhead_is_static_object(void * addr)470 static bool rcuhead_is_static_object(void *addr)
471 {
472 	return true;
473 }
474 
475 /**
476  * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
477  * @head: pointer to rcu_head structure to be initialized
478  *
479  * This function informs debugobjects of a new rcu_head structure that
480  * has been allocated as an auto variable on the stack.  This function
481  * is not required for rcu_head structures that are statically defined or
482  * that are dynamically allocated on the heap.  This function has no
483  * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
484  */
init_rcu_head_on_stack(struct rcu_head * head)485 void init_rcu_head_on_stack(struct rcu_head *head)
486 {
487 	debug_object_init_on_stack(head, &rcuhead_debug_descr);
488 }
489 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
490 
491 /**
492  * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
493  * @head: pointer to rcu_head structure to be initialized
494  *
495  * This function informs debugobjects that an on-stack rcu_head structure
496  * is about to go out of scope.  As with init_rcu_head_on_stack(), this
497  * function is not required for rcu_head structures that are statically
498  * defined or that are dynamically allocated on the heap.  Also as with
499  * init_rcu_head_on_stack(), this function has no effect for
500  * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
501  */
destroy_rcu_head_on_stack(struct rcu_head * head)502 void destroy_rcu_head_on_stack(struct rcu_head *head)
503 {
504 	debug_object_free(head, &rcuhead_debug_descr);
505 }
506 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
507 
508 const struct debug_obj_descr rcuhead_debug_descr = {
509 	.name = "rcu_head",
510 	.is_static_object = rcuhead_is_static_object,
511 };
512 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
513 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
514 
515 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE)
do_trace_rcu_torture_read(const char * rcutorturename,struct rcu_head * rhp,unsigned long secs,unsigned long c_old,unsigned long c)516 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
517 			       unsigned long secs,
518 			       unsigned long c_old, unsigned long c)
519 {
520 	trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
521 }
522 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
523 #else
524 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
525 	do { } while (0)
526 #endif
527 
528 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST)
529 /* Get rcutorture access to sched_setaffinity(). */
torture_sched_setaffinity(pid_t pid,const struct cpumask * in_mask)530 long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
531 {
532 	int ret;
533 
534 	ret = sched_setaffinity(pid, in_mask);
535 	WARN_ONCE(ret, "%s: sched_setaffinity(%d) returned %d\n", __func__, pid, ret);
536 	return ret;
537 }
538 EXPORT_SYMBOL_GPL(torture_sched_setaffinity);
539 #endif
540 
541 int rcu_cpu_stall_notifiers __read_mostly; // !0 = provide stall notifiers (rarely useful)
542 EXPORT_SYMBOL_GPL(rcu_cpu_stall_notifiers);
543 
544 #ifdef CONFIG_RCU_STALL_COMMON
545 int rcu_cpu_stall_ftrace_dump __read_mostly;
546 module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
547 #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER
548 module_param(rcu_cpu_stall_notifiers, int, 0444);
549 #endif // #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER
550 int rcu_cpu_stall_suppress __read_mostly; // !0 = suppress stall warnings.
551 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
552 module_param(rcu_cpu_stall_suppress, int, 0644);
553 int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
554 module_param(rcu_cpu_stall_timeout, int, 0644);
555 int rcu_exp_cpu_stall_timeout __read_mostly = CONFIG_RCU_EXP_CPU_STALL_TIMEOUT;
556 module_param(rcu_exp_cpu_stall_timeout, int, 0644);
557 int rcu_cpu_stall_cputime __read_mostly = IS_ENABLED(CONFIG_RCU_CPU_STALL_CPUTIME);
558 module_param(rcu_cpu_stall_cputime, int, 0644);
559 bool rcu_exp_stall_task_details __read_mostly;
560 module_param(rcu_exp_stall_task_details, bool, 0644);
561 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
562 
563 // Suppress boot-time RCU CPU stall warnings and rcutorture writer stall
564 // warnings.  Also used by rcutorture even if stall warnings are excluded.
565 int rcu_cpu_stall_suppress_at_boot __read_mostly; // !0 = suppress boot stalls.
566 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot);
567 module_param(rcu_cpu_stall_suppress_at_boot, int, 0444);
568 
569 /**
570  * get_completed_synchronize_rcu - Return a pre-completed polled state cookie
571  *
572  * Returns a value that will always be treated by functions like
573  * poll_state_synchronize_rcu() as a cookie whose grace period has already
574  * completed.
575  */
get_completed_synchronize_rcu(void)576 unsigned long get_completed_synchronize_rcu(void)
577 {
578 	return RCU_GET_STATE_COMPLETED;
579 }
580 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu);
581 
582 #ifdef CONFIG_PROVE_RCU
583 
584 /*
585  * Early boot self test parameters.
586  */
587 static bool rcu_self_test;
588 module_param(rcu_self_test, bool, 0444);
589 
590 static int rcu_self_test_counter;
591 
test_callback(struct rcu_head * r)592 static void test_callback(struct rcu_head *r)
593 {
594 	rcu_self_test_counter++;
595 	pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
596 }
597 
598 DEFINE_STATIC_SRCU(early_srcu);
599 static unsigned long early_srcu_cookie;
600 
601 struct early_boot_kfree_rcu {
602 	struct rcu_head rh;
603 };
604 
early_boot_test_call_rcu(void)605 static void early_boot_test_call_rcu(void)
606 {
607 	static struct rcu_head head;
608 	int idx;
609 	static struct rcu_head shead;
610 	struct early_boot_kfree_rcu *rhp;
611 
612 	idx = srcu_down_read(&early_srcu);
613 	srcu_up_read(&early_srcu, idx);
614 	call_rcu(&head, test_callback);
615 	early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu);
616 	call_srcu(&early_srcu, &shead, test_callback);
617 	rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
618 	if (!WARN_ON_ONCE(!rhp))
619 		kfree_rcu(rhp, rh);
620 }
621 
rcu_early_boot_tests(void)622 void rcu_early_boot_tests(void)
623 {
624 	pr_info("Running RCU self tests\n");
625 
626 	if (rcu_self_test)
627 		early_boot_test_call_rcu();
628 	rcu_test_sync_prims();
629 }
630 
rcu_verify_early_boot_tests(void)631 static int rcu_verify_early_boot_tests(void)
632 {
633 	int ret = 0;
634 	int early_boot_test_counter = 0;
635 
636 	if (rcu_self_test) {
637 		early_boot_test_counter++;
638 		rcu_barrier();
639 		early_boot_test_counter++;
640 		srcu_barrier(&early_srcu);
641 		WARN_ON_ONCE(!poll_state_synchronize_srcu(&early_srcu, early_srcu_cookie));
642 		cleanup_srcu_struct(&early_srcu);
643 	}
644 	if (rcu_self_test_counter != early_boot_test_counter) {
645 		WARN_ON(1);
646 		ret = -1;
647 	}
648 
649 	return ret;
650 }
651 late_initcall(rcu_verify_early_boot_tests);
652 #else
rcu_early_boot_tests(void)653 void rcu_early_boot_tests(void) {}
654 #endif /* CONFIG_PROVE_RCU */
655 
656 #include "tasks.h"
657 
658 #ifndef CONFIG_TINY_RCU
659 
660 /*
661  * Print any significant non-default boot-time settings.
662  */
rcupdate_announce_bootup_oddness(void)663 void __init rcupdate_announce_bootup_oddness(void)
664 {
665 	if (rcu_normal)
666 		pr_info("\tNo expedited grace period (rcu_normal).\n");
667 	else if (rcu_normal_after_boot)
668 		pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
669 	else if (rcu_expedited)
670 		pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
671 	if (rcu_cpu_stall_suppress)
672 		pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
673 	if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
674 		pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
675 	rcu_tasks_bootup_oddness();
676 }
677 
678 #endif /* #ifndef CONFIG_TINY_RCU */
679