xref: /linux/kernel/rcu/update.c (revision 31486372a1e9a66ec2e9e2903b8792bba7e503e1)
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright IBM Corporation, 2001
19  *
20  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21  *	    Manfred Spraul <manfred@colorfullife.com>
22  *
23  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25  * Papers:
26  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28  *
29  * For detailed explanation of Read-Copy Update mechanism see -
30  *		http://lse.sourceforge.net/locking/rcupdate.html
31  *
32  */
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/interrupt.h>
39 #include <linux/sched/signal.h>
40 #include <linux/sched/debug.h>
41 #include <linux/atomic.h>
42 #include <linux/bitops.h>
43 #include <linux/percpu.h>
44 #include <linux/notifier.h>
45 #include <linux/cpu.h>
46 #include <linux/mutex.h>
47 #include <linux/export.h>
48 #include <linux/hardirq.h>
49 #include <linux/delay.h>
50 #include <linux/moduleparam.h>
51 #include <linux/kthread.h>
52 #include <linux/tick.h>
53 #include <linux/rcupdate_wait.h>
54 
55 #define CREATE_TRACE_POINTS
56 
57 #include "rcu.h"
58 
59 #ifdef MODULE_PARAM_PREFIX
60 #undef MODULE_PARAM_PREFIX
61 #endif
62 #define MODULE_PARAM_PREFIX "rcupdate."
63 
64 #ifndef CONFIG_TINY_RCU
65 extern int rcu_expedited; /* from sysctl */
66 module_param(rcu_expedited, int, 0);
67 extern int rcu_normal; /* from sysctl */
68 module_param(rcu_normal, int, 0);
69 static int rcu_normal_after_boot;
70 module_param(rcu_normal_after_boot, int, 0);
71 #endif /* #ifndef CONFIG_TINY_RCU */
72 
73 #ifdef CONFIG_DEBUG_LOCK_ALLOC
74 /**
75  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
76  *
77  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
78  * RCU-sched read-side critical section.  In absence of
79  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
80  * critical section unless it can prove otherwise.  Note that disabling
81  * of preemption (including disabling irqs) counts as an RCU-sched
82  * read-side critical section.  This is useful for debug checks in functions
83  * that required that they be called within an RCU-sched read-side
84  * critical section.
85  *
86  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
87  * and while lockdep is disabled.
88  *
89  * Note that if the CPU is in the idle loop from an RCU point of
90  * view (ie: that we are in the section between rcu_idle_enter() and
91  * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
92  * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
93  * that are in such a section, considering these as in extended quiescent
94  * state, so such a CPU is effectively never in an RCU read-side critical
95  * section regardless of what RCU primitives it invokes.  This state of
96  * affairs is required --- we need to keep an RCU-free window in idle
97  * where the CPU may possibly enter into low power mode. This way we can
98  * notice an extended quiescent state to other CPUs that started a grace
99  * period. Otherwise we would delay any grace period as long as we run in
100  * the idle task.
101  *
102  * Similarly, we avoid claiming an SRCU read lock held if the current
103  * CPU is offline.
104  */
105 int rcu_read_lock_sched_held(void)
106 {
107 	int lockdep_opinion = 0;
108 
109 	if (!debug_lockdep_rcu_enabled())
110 		return 1;
111 	if (!rcu_is_watching())
112 		return 0;
113 	if (!rcu_lockdep_current_cpu_online())
114 		return 0;
115 	if (debug_locks)
116 		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
117 	return lockdep_opinion || !preemptible();
118 }
119 EXPORT_SYMBOL(rcu_read_lock_sched_held);
120 #endif
121 
122 #ifndef CONFIG_TINY_RCU
123 
124 /*
125  * Should expedited grace-period primitives always fall back to their
126  * non-expedited counterparts?  Intended for use within RCU.  Note
127  * that if the user specifies both rcu_expedited and rcu_normal, then
128  * rcu_normal wins.  (Except during the time period during boot from
129  * when the first task is spawned until the rcu_set_runtime_mode()
130  * core_initcall() is invoked, at which point everything is expedited.)
131  */
132 bool rcu_gp_is_normal(void)
133 {
134 	return READ_ONCE(rcu_normal) &&
135 	       rcu_scheduler_active != RCU_SCHEDULER_INIT;
136 }
137 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
138 
139 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
140 
141 /*
142  * Should normal grace-period primitives be expedited?  Intended for
143  * use within RCU.  Note that this function takes the rcu_expedited
144  * sysfs/boot variable and rcu_scheduler_active into account as well
145  * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
146  * until rcu_gp_is_expedited() returns false is a -really- bad idea.
147  */
148 bool rcu_gp_is_expedited(void)
149 {
150 	return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
151 	       rcu_scheduler_active == RCU_SCHEDULER_INIT;
152 }
153 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
154 
155 /**
156  * rcu_expedite_gp - Expedite future RCU grace periods
157  *
158  * After a call to this function, future calls to synchronize_rcu() and
159  * friends act as the corresponding synchronize_rcu_expedited() function
160  * had instead been called.
161  */
162 void rcu_expedite_gp(void)
163 {
164 	atomic_inc(&rcu_expedited_nesting);
165 }
166 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
167 
168 /**
169  * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
170  *
171  * Undo a prior call to rcu_expedite_gp().  If all prior calls to
172  * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
173  * and if the rcu_expedited sysfs/boot parameter is not set, then all
174  * subsequent calls to synchronize_rcu() and friends will return to
175  * their normal non-expedited behavior.
176  */
177 void rcu_unexpedite_gp(void)
178 {
179 	atomic_dec(&rcu_expedited_nesting);
180 }
181 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
182 
183 /*
184  * Inform RCU of the end of the in-kernel boot sequence.
185  */
186 void rcu_end_inkernel_boot(void)
187 {
188 	rcu_unexpedite_gp();
189 	if (rcu_normal_after_boot)
190 		WRITE_ONCE(rcu_normal, 1);
191 }
192 
193 #endif /* #ifndef CONFIG_TINY_RCU */
194 
195 /*
196  * Test each non-SRCU synchronous grace-period wait API.  This is
197  * useful just after a change in mode for these primitives, and
198  * during early boot.
199  */
200 void rcu_test_sync_prims(void)
201 {
202 	if (!IS_ENABLED(CONFIG_PROVE_RCU))
203 		return;
204 	synchronize_rcu();
205 	synchronize_rcu_bh();
206 	synchronize_sched();
207 	synchronize_rcu_expedited();
208 	synchronize_rcu_bh_expedited();
209 	synchronize_sched_expedited();
210 }
211 
212 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
213 
214 /*
215  * Switch to run-time mode once RCU has fully initialized.
216  */
217 static int __init rcu_set_runtime_mode(void)
218 {
219 	rcu_test_sync_prims();
220 	rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
221 	rcu_test_sync_prims();
222 	return 0;
223 }
224 core_initcall(rcu_set_runtime_mode);
225 
226 #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
227 
228 #ifdef CONFIG_PREEMPT_RCU
229 
230 /*
231  * Preemptible RCU implementation for rcu_read_lock().
232  * Just increment ->rcu_read_lock_nesting, shared state will be updated
233  * if we block.
234  */
235 void __rcu_read_lock(void)
236 {
237 	current->rcu_read_lock_nesting++;
238 	barrier();  /* critical section after entry code. */
239 }
240 EXPORT_SYMBOL_GPL(__rcu_read_lock);
241 
242 /*
243  * Preemptible RCU implementation for rcu_read_unlock().
244  * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
245  * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
246  * invoke rcu_read_unlock_special() to clean up after a context switch
247  * in an RCU read-side critical section and other special cases.
248  */
249 void __rcu_read_unlock(void)
250 {
251 	struct task_struct *t = current;
252 
253 	if (t->rcu_read_lock_nesting != 1) {
254 		--t->rcu_read_lock_nesting;
255 	} else {
256 		barrier();  /* critical section before exit code. */
257 		t->rcu_read_lock_nesting = INT_MIN;
258 		barrier();  /* assign before ->rcu_read_unlock_special load */
259 		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
260 			rcu_read_unlock_special(t);
261 		barrier();  /* ->rcu_read_unlock_special load before assign */
262 		t->rcu_read_lock_nesting = 0;
263 	}
264 #ifdef CONFIG_PROVE_LOCKING
265 	{
266 		int rrln = READ_ONCE(t->rcu_read_lock_nesting);
267 
268 		WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
269 	}
270 #endif /* #ifdef CONFIG_PROVE_LOCKING */
271 }
272 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
273 
274 #endif /* #ifdef CONFIG_PREEMPT_RCU */
275 
276 #ifdef CONFIG_DEBUG_LOCK_ALLOC
277 static struct lock_class_key rcu_lock_key;
278 struct lockdep_map rcu_lock_map =
279 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
280 EXPORT_SYMBOL_GPL(rcu_lock_map);
281 
282 static struct lock_class_key rcu_bh_lock_key;
283 struct lockdep_map rcu_bh_lock_map =
284 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
285 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
286 
287 static struct lock_class_key rcu_sched_lock_key;
288 struct lockdep_map rcu_sched_lock_map =
289 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
290 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
291 
292 static struct lock_class_key rcu_callback_key;
293 struct lockdep_map rcu_callback_map =
294 	STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
295 EXPORT_SYMBOL_GPL(rcu_callback_map);
296 
297 int notrace debug_lockdep_rcu_enabled(void)
298 {
299 	return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
300 	       current->lockdep_recursion == 0;
301 }
302 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
303 
304 /**
305  * rcu_read_lock_held() - might we be in RCU read-side critical section?
306  *
307  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
308  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
309  * this assumes we are in an RCU read-side critical section unless it can
310  * prove otherwise.  This is useful for debug checks in functions that
311  * require that they be called within an RCU read-side critical section.
312  *
313  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
314  * and while lockdep is disabled.
315  *
316  * Note that rcu_read_lock() and the matching rcu_read_unlock() must
317  * occur in the same context, for example, it is illegal to invoke
318  * rcu_read_unlock() in process context if the matching rcu_read_lock()
319  * was invoked from within an irq handler.
320  *
321  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
322  * offline from an RCU perspective, so check for those as well.
323  */
324 int rcu_read_lock_held(void)
325 {
326 	if (!debug_lockdep_rcu_enabled())
327 		return 1;
328 	if (!rcu_is_watching())
329 		return 0;
330 	if (!rcu_lockdep_current_cpu_online())
331 		return 0;
332 	return lock_is_held(&rcu_lock_map);
333 }
334 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
335 
336 /**
337  * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
338  *
339  * Check for bottom half being disabled, which covers both the
340  * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
341  * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
342  * will show the situation.  This is useful for debug checks in functions
343  * that require that they be called within an RCU read-side critical
344  * section.
345  *
346  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
347  *
348  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
349  * offline from an RCU perspective, so check for those as well.
350  */
351 int rcu_read_lock_bh_held(void)
352 {
353 	if (!debug_lockdep_rcu_enabled())
354 		return 1;
355 	if (!rcu_is_watching())
356 		return 0;
357 	if (!rcu_lockdep_current_cpu_online())
358 		return 0;
359 	return in_softirq() || irqs_disabled();
360 }
361 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
362 
363 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
364 
365 /**
366  * wakeme_after_rcu() - Callback function to awaken a task after grace period
367  * @head: Pointer to rcu_head member within rcu_synchronize structure
368  *
369  * Awaken the corresponding task now that a grace period has elapsed.
370  */
371 void wakeme_after_rcu(struct rcu_head *head)
372 {
373 	struct rcu_synchronize *rcu;
374 
375 	rcu = container_of(head, struct rcu_synchronize, head);
376 	complete(&rcu->completion);
377 }
378 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
379 
380 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
381 		   struct rcu_synchronize *rs_array)
382 {
383 	int i;
384 	int j;
385 
386 	/* Initialize and register callbacks for each flavor specified. */
387 	for (i = 0; i < n; i++) {
388 		if (checktiny &&
389 		    (crcu_array[i] == call_rcu ||
390 		     crcu_array[i] == call_rcu_bh)) {
391 			might_sleep();
392 			continue;
393 		}
394 		init_rcu_head_on_stack(&rs_array[i].head);
395 		init_completion(&rs_array[i].completion);
396 		for (j = 0; j < i; j++)
397 			if (crcu_array[j] == crcu_array[i])
398 				break;
399 		if (j == i)
400 			(crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
401 	}
402 
403 	/* Wait for all callbacks to be invoked. */
404 	for (i = 0; i < n; i++) {
405 		if (checktiny &&
406 		    (crcu_array[i] == call_rcu ||
407 		     crcu_array[i] == call_rcu_bh))
408 			continue;
409 		for (j = 0; j < i; j++)
410 			if (crcu_array[j] == crcu_array[i])
411 				break;
412 		if (j == i)
413 			wait_for_completion(&rs_array[i].completion);
414 		destroy_rcu_head_on_stack(&rs_array[i].head);
415 	}
416 }
417 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
418 
419 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
420 void init_rcu_head(struct rcu_head *head)
421 {
422 	debug_object_init(head, &rcuhead_debug_descr);
423 }
424 
425 void destroy_rcu_head(struct rcu_head *head)
426 {
427 	debug_object_free(head, &rcuhead_debug_descr);
428 }
429 
430 static bool rcuhead_is_static_object(void *addr)
431 {
432 	return true;
433 }
434 
435 /**
436  * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
437  * @head: pointer to rcu_head structure to be initialized
438  *
439  * This function informs debugobjects of a new rcu_head structure that
440  * has been allocated as an auto variable on the stack.  This function
441  * is not required for rcu_head structures that are statically defined or
442  * that are dynamically allocated on the heap.  This function has no
443  * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
444  */
445 void init_rcu_head_on_stack(struct rcu_head *head)
446 {
447 	debug_object_init_on_stack(head, &rcuhead_debug_descr);
448 }
449 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
450 
451 /**
452  * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
453  * @head: pointer to rcu_head structure to be initialized
454  *
455  * This function informs debugobjects that an on-stack rcu_head structure
456  * is about to go out of scope.  As with init_rcu_head_on_stack(), this
457  * function is not required for rcu_head structures that are statically
458  * defined or that are dynamically allocated on the heap.  Also as with
459  * init_rcu_head_on_stack(), this function has no effect for
460  * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
461  */
462 void destroy_rcu_head_on_stack(struct rcu_head *head)
463 {
464 	debug_object_free(head, &rcuhead_debug_descr);
465 }
466 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
467 
468 struct debug_obj_descr rcuhead_debug_descr = {
469 	.name = "rcu_head",
470 	.is_static_object = rcuhead_is_static_object,
471 };
472 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
473 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
474 
475 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
476 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
477 			       unsigned long secs,
478 			       unsigned long c_old, unsigned long c)
479 {
480 	trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
481 }
482 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
483 #else
484 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
485 	do { } while (0)
486 #endif
487 
488 #ifdef CONFIG_RCU_STALL_COMMON
489 
490 #ifdef CONFIG_PROVE_RCU
491 #define RCU_STALL_DELAY_DELTA	       (5 * HZ)
492 #else
493 #define RCU_STALL_DELAY_DELTA	       0
494 #endif
495 
496 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
497 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
498 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
499 
500 module_param(rcu_cpu_stall_suppress, int, 0644);
501 module_param(rcu_cpu_stall_timeout, int, 0644);
502 
503 int rcu_jiffies_till_stall_check(void)
504 {
505 	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
506 
507 	/*
508 	 * Limit check must be consistent with the Kconfig limits
509 	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
510 	 */
511 	if (till_stall_check < 3) {
512 		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
513 		till_stall_check = 3;
514 	} else if (till_stall_check > 300) {
515 		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
516 		till_stall_check = 300;
517 	}
518 	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
519 }
520 
521 void rcu_sysrq_start(void)
522 {
523 	if (!rcu_cpu_stall_suppress)
524 		rcu_cpu_stall_suppress = 2;
525 }
526 
527 void rcu_sysrq_end(void)
528 {
529 	if (rcu_cpu_stall_suppress == 2)
530 		rcu_cpu_stall_suppress = 0;
531 }
532 
533 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
534 {
535 	rcu_cpu_stall_suppress = 1;
536 	return NOTIFY_DONE;
537 }
538 
539 static struct notifier_block rcu_panic_block = {
540 	.notifier_call = rcu_panic,
541 };
542 
543 static int __init check_cpu_stall_init(void)
544 {
545 	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
546 	return 0;
547 }
548 early_initcall(check_cpu_stall_init);
549 
550 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
551 
552 #ifdef CONFIG_TASKS_RCU
553 
554 /*
555  * Simple variant of RCU whose quiescent states are voluntary context switch,
556  * user-space execution, and idle.  As such, grace periods can take one good
557  * long time.  There are no read-side primitives similar to rcu_read_lock()
558  * and rcu_read_unlock() because this implementation is intended to get
559  * the system into a safe state for some of the manipulations involved in
560  * tracing and the like.  Finally, this implementation does not support
561  * high call_rcu_tasks() rates from multiple CPUs.  If this is required,
562  * per-CPU callback lists will be needed.
563  */
564 
565 /* Global list of callbacks and associated lock. */
566 static struct rcu_head *rcu_tasks_cbs_head;
567 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
568 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
569 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
570 
571 /* Track exiting tasks in order to allow them to be waited for. */
572 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
573 
574 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
575 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
576 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
577 module_param(rcu_task_stall_timeout, int, 0644);
578 
579 static struct task_struct *rcu_tasks_kthread_ptr;
580 
581 /**
582  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
583  * @rhp: structure to be used for queueing the RCU updates.
584  * @func: actual callback function to be invoked after the grace period
585  *
586  * The callback function will be invoked some time after a full grace
587  * period elapses, in other words after all currently executing RCU
588  * read-side critical sections have completed. call_rcu_tasks() assumes
589  * that the read-side critical sections end at a voluntary context
590  * switch (not a preemption!), entry into idle, or transition to usermode
591  * execution.  As such, there are no read-side primitives analogous to
592  * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
593  * to determine that all tasks have passed through a safe state, not so
594  * much for data-strcuture synchronization.
595  *
596  * See the description of call_rcu() for more detailed information on
597  * memory ordering guarantees.
598  */
599 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
600 {
601 	unsigned long flags;
602 	bool needwake;
603 
604 	rhp->next = NULL;
605 	rhp->func = func;
606 	raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
607 	needwake = !rcu_tasks_cbs_head;
608 	*rcu_tasks_cbs_tail = rhp;
609 	rcu_tasks_cbs_tail = &rhp->next;
610 	raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
611 	/* We can't create the thread unless interrupts are enabled. */
612 	if (needwake && READ_ONCE(rcu_tasks_kthread_ptr))
613 		wake_up(&rcu_tasks_cbs_wq);
614 }
615 EXPORT_SYMBOL_GPL(call_rcu_tasks);
616 
617 /**
618  * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
619  *
620  * Control will return to the caller some time after a full rcu-tasks
621  * grace period has elapsed, in other words after all currently
622  * executing rcu-tasks read-side critical sections have elapsed.  These
623  * read-side critical sections are delimited by calls to schedule(),
624  * cond_resched_rcu_qs(), idle execution, userspace execution, calls
625  * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
626  *
627  * This is a very specialized primitive, intended only for a few uses in
628  * tracing and other situations requiring manipulation of function
629  * preambles and profiling hooks.  The synchronize_rcu_tasks() function
630  * is not (yet) intended for heavy use from multiple CPUs.
631  *
632  * Note that this guarantee implies further memory-ordering guarantees.
633  * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
634  * each CPU is guaranteed to have executed a full memory barrier since the
635  * end of its last RCU-tasks read-side critical section whose beginning
636  * preceded the call to synchronize_rcu_tasks().  In addition, each CPU
637  * having an RCU-tasks read-side critical section that extends beyond
638  * the return from synchronize_rcu_tasks() is guaranteed to have executed
639  * a full memory barrier after the beginning of synchronize_rcu_tasks()
640  * and before the beginning of that RCU-tasks read-side critical section.
641  * Note that these guarantees include CPUs that are offline, idle, or
642  * executing in user mode, as well as CPUs that are executing in the kernel.
643  *
644  * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
645  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
646  * to have executed a full memory barrier during the execution of
647  * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
648  * (but again only if the system has more than one CPU).
649  */
650 void synchronize_rcu_tasks(void)
651 {
652 	/* Complain if the scheduler has not started.  */
653 	RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
654 			 "synchronize_rcu_tasks called too soon");
655 
656 	/* Wait for the grace period. */
657 	wait_rcu_gp(call_rcu_tasks);
658 }
659 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
660 
661 /**
662  * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
663  *
664  * Although the current implementation is guaranteed to wait, it is not
665  * obligated to, for example, if there are no pending callbacks.
666  */
667 void rcu_barrier_tasks(void)
668 {
669 	/* There is only one callback queue, so this is easy.  ;-) */
670 	synchronize_rcu_tasks();
671 }
672 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
673 
674 /* See if tasks are still holding out, complain if so. */
675 static void check_holdout_task(struct task_struct *t,
676 			       bool needreport, bool *firstreport)
677 {
678 	int cpu;
679 
680 	if (!READ_ONCE(t->rcu_tasks_holdout) ||
681 	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
682 	    !READ_ONCE(t->on_rq) ||
683 	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
684 	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
685 		WRITE_ONCE(t->rcu_tasks_holdout, false);
686 		list_del_init(&t->rcu_tasks_holdout_list);
687 		put_task_struct(t);
688 		return;
689 	}
690 	rcu_request_urgent_qs_task(t);
691 	if (!needreport)
692 		return;
693 	if (*firstreport) {
694 		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
695 		*firstreport = false;
696 	}
697 	cpu = task_cpu(t);
698 	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
699 		 t, ".I"[is_idle_task(t)],
700 		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
701 		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
702 		 t->rcu_tasks_idle_cpu, cpu);
703 	sched_show_task(t);
704 }
705 
706 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
707 static int __noreturn rcu_tasks_kthread(void *arg)
708 {
709 	unsigned long flags;
710 	struct task_struct *g, *t;
711 	unsigned long lastreport;
712 	struct rcu_head *list;
713 	struct rcu_head *next;
714 	LIST_HEAD(rcu_tasks_holdouts);
715 
716 	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
717 	housekeeping_affine(current);
718 
719 	/*
720 	 * Each pass through the following loop makes one check for
721 	 * newly arrived callbacks, and, if there are some, waits for
722 	 * one RCU-tasks grace period and then invokes the callbacks.
723 	 * This loop is terminated by the system going down.  ;-)
724 	 */
725 	for (;;) {
726 
727 		/* Pick up any new callbacks. */
728 		raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
729 		list = rcu_tasks_cbs_head;
730 		rcu_tasks_cbs_head = NULL;
731 		rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
732 		raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
733 
734 		/* If there were none, wait a bit and start over. */
735 		if (!list) {
736 			wait_event_interruptible(rcu_tasks_cbs_wq,
737 						 rcu_tasks_cbs_head);
738 			if (!rcu_tasks_cbs_head) {
739 				WARN_ON(signal_pending(current));
740 				schedule_timeout_interruptible(HZ/10);
741 			}
742 			continue;
743 		}
744 
745 		/*
746 		 * Wait for all pre-existing t->on_rq and t->nvcsw
747 		 * transitions to complete.  Invoking synchronize_sched()
748 		 * suffices because all these transitions occur with
749 		 * interrupts disabled.  Without this synchronize_sched(),
750 		 * a read-side critical section that started before the
751 		 * grace period might be incorrectly seen as having started
752 		 * after the grace period.
753 		 *
754 		 * This synchronize_sched() also dispenses with the
755 		 * need for a memory barrier on the first store to
756 		 * ->rcu_tasks_holdout, as it forces the store to happen
757 		 * after the beginning of the grace period.
758 		 */
759 		synchronize_sched();
760 
761 		/*
762 		 * There were callbacks, so we need to wait for an
763 		 * RCU-tasks grace period.  Start off by scanning
764 		 * the task list for tasks that are not already
765 		 * voluntarily blocked.  Mark these tasks and make
766 		 * a list of them in rcu_tasks_holdouts.
767 		 */
768 		rcu_read_lock();
769 		for_each_process_thread(g, t) {
770 			if (t != current && READ_ONCE(t->on_rq) &&
771 			    !is_idle_task(t)) {
772 				get_task_struct(t);
773 				t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
774 				WRITE_ONCE(t->rcu_tasks_holdout, true);
775 				list_add(&t->rcu_tasks_holdout_list,
776 					 &rcu_tasks_holdouts);
777 			}
778 		}
779 		rcu_read_unlock();
780 
781 		/*
782 		 * Wait for tasks that are in the process of exiting.
783 		 * This does only part of the job, ensuring that all
784 		 * tasks that were previously exiting reach the point
785 		 * where they have disabled preemption, allowing the
786 		 * later synchronize_sched() to finish the job.
787 		 */
788 		synchronize_srcu(&tasks_rcu_exit_srcu);
789 
790 		/*
791 		 * Each pass through the following loop scans the list
792 		 * of holdout tasks, removing any that are no longer
793 		 * holdouts.  When the list is empty, we are done.
794 		 */
795 		lastreport = jiffies;
796 		while (!list_empty(&rcu_tasks_holdouts)) {
797 			bool firstreport;
798 			bool needreport;
799 			int rtst;
800 			struct task_struct *t1;
801 
802 			schedule_timeout_interruptible(HZ);
803 			rtst = READ_ONCE(rcu_task_stall_timeout);
804 			needreport = rtst > 0 &&
805 				     time_after(jiffies, lastreport + rtst);
806 			if (needreport)
807 				lastreport = jiffies;
808 			firstreport = true;
809 			WARN_ON(signal_pending(current));
810 			list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
811 						rcu_tasks_holdout_list) {
812 				check_holdout_task(t, needreport, &firstreport);
813 				cond_resched();
814 			}
815 		}
816 
817 		/*
818 		 * Because ->on_rq and ->nvcsw are not guaranteed
819 		 * to have a full memory barriers prior to them in the
820 		 * schedule() path, memory reordering on other CPUs could
821 		 * cause their RCU-tasks read-side critical sections to
822 		 * extend past the end of the grace period.  However,
823 		 * because these ->nvcsw updates are carried out with
824 		 * interrupts disabled, we can use synchronize_sched()
825 		 * to force the needed ordering on all such CPUs.
826 		 *
827 		 * This synchronize_sched() also confines all
828 		 * ->rcu_tasks_holdout accesses to be within the grace
829 		 * period, avoiding the need for memory barriers for
830 		 * ->rcu_tasks_holdout accesses.
831 		 *
832 		 * In addition, this synchronize_sched() waits for exiting
833 		 * tasks to complete their final preempt_disable() region
834 		 * of execution, cleaning up after the synchronize_srcu()
835 		 * above.
836 		 */
837 		synchronize_sched();
838 
839 		/* Invoke the callbacks. */
840 		while (list) {
841 			next = list->next;
842 			local_bh_disable();
843 			list->func(list);
844 			local_bh_enable();
845 			list = next;
846 			cond_resched();
847 		}
848 		schedule_timeout_uninterruptible(HZ/10);
849 	}
850 }
851 
852 /* Spawn rcu_tasks_kthread() at core_initcall() time. */
853 static int __init rcu_spawn_tasks_kthread(void)
854 {
855 	struct task_struct *t;
856 
857 	t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
858 	BUG_ON(IS_ERR(t));
859 	smp_mb(); /* Ensure others see full kthread. */
860 	WRITE_ONCE(rcu_tasks_kthread_ptr, t);
861 	return 0;
862 }
863 core_initcall(rcu_spawn_tasks_kthread);
864 
865 /* Do the srcu_read_lock() for the above synchronize_srcu().  */
866 void exit_tasks_rcu_start(void)
867 {
868 	preempt_disable();
869 	current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
870 	preempt_enable();
871 }
872 
873 /* Do the srcu_read_unlock() for the above synchronize_srcu().  */
874 void exit_tasks_rcu_finish(void)
875 {
876 	preempt_disable();
877 	__srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
878 	preempt_enable();
879 }
880 
881 #endif /* #ifdef CONFIG_TASKS_RCU */
882 
883 #ifndef CONFIG_TINY_RCU
884 
885 /*
886  * Print any non-default Tasks RCU settings.
887  */
888 static void __init rcu_tasks_bootup_oddness(void)
889 {
890 #ifdef CONFIG_TASKS_RCU
891 	if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
892 		pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
893 	else
894 		pr_info("\tTasks RCU enabled.\n");
895 #endif /* #ifdef CONFIG_TASKS_RCU */
896 }
897 
898 #endif /* #ifndef CONFIG_TINY_RCU */
899 
900 #ifdef CONFIG_PROVE_RCU
901 
902 /*
903  * Early boot self test parameters, one for each flavor
904  */
905 static bool rcu_self_test;
906 static bool rcu_self_test_bh;
907 static bool rcu_self_test_sched;
908 
909 module_param(rcu_self_test, bool, 0444);
910 module_param(rcu_self_test_bh, bool, 0444);
911 module_param(rcu_self_test_sched, bool, 0444);
912 
913 static int rcu_self_test_counter;
914 
915 static void test_callback(struct rcu_head *r)
916 {
917 	rcu_self_test_counter++;
918 	pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
919 }
920 
921 static void early_boot_test_call_rcu(void)
922 {
923 	static struct rcu_head head;
924 
925 	call_rcu(&head, test_callback);
926 }
927 
928 static void early_boot_test_call_rcu_bh(void)
929 {
930 	static struct rcu_head head;
931 
932 	call_rcu_bh(&head, test_callback);
933 }
934 
935 static void early_boot_test_call_rcu_sched(void)
936 {
937 	static struct rcu_head head;
938 
939 	call_rcu_sched(&head, test_callback);
940 }
941 
942 void rcu_early_boot_tests(void)
943 {
944 	pr_info("Running RCU self tests\n");
945 
946 	if (rcu_self_test)
947 		early_boot_test_call_rcu();
948 	if (rcu_self_test_bh)
949 		early_boot_test_call_rcu_bh();
950 	if (rcu_self_test_sched)
951 		early_boot_test_call_rcu_sched();
952 	rcu_test_sync_prims();
953 }
954 
955 static int rcu_verify_early_boot_tests(void)
956 {
957 	int ret = 0;
958 	int early_boot_test_counter = 0;
959 
960 	if (rcu_self_test) {
961 		early_boot_test_counter++;
962 		rcu_barrier();
963 	}
964 	if (rcu_self_test_bh) {
965 		early_boot_test_counter++;
966 		rcu_barrier_bh();
967 	}
968 	if (rcu_self_test_sched) {
969 		early_boot_test_counter++;
970 		rcu_barrier_sched();
971 	}
972 
973 	if (rcu_self_test_counter != early_boot_test_counter) {
974 		WARN_ON(1);
975 		ret = -1;
976 	}
977 
978 	return ret;
979 }
980 late_initcall(rcu_verify_early_boot_tests);
981 #else
982 void rcu_early_boot_tests(void) {}
983 #endif /* CONFIG_PROVE_RCU */
984 
985 #ifndef CONFIG_TINY_RCU
986 
987 /*
988  * Print any significant non-default boot-time settings.
989  */
990 void __init rcupdate_announce_bootup_oddness(void)
991 {
992 	if (rcu_normal)
993 		pr_info("\tNo expedited grace period (rcu_normal).\n");
994 	else if (rcu_normal_after_boot)
995 		pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
996 	else if (rcu_expedited)
997 		pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
998 	if (rcu_cpu_stall_suppress)
999 		pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
1000 	if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
1001 		pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
1002 	rcu_tasks_bootup_oddness();
1003 }
1004 
1005 #endif /* #ifndef CONFIG_TINY_RCU */
1006