xref: /linux/kernel/rcu/tree_plugin.h (revision e0bf6c5ca2d3281f231c5f0c9bf145e9513644de)
1 /*
2  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3  * Internal non-public definitions that provide either classic
4  * or preemptible semantics.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  * Copyright Red Hat, 2009
21  * Copyright IBM Corporation, 2009
22  *
23  * Author: Ingo Molnar <mingo@elte.hu>
24  *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25  */
26 
27 #include <linux/delay.h>
28 #include <linux/gfp.h>
29 #include <linux/oom.h>
30 #include <linux/smpboot.h>
31 #include "../time/tick-internal.h"
32 
33 #ifdef CONFIG_RCU_BOOST
34 
35 #include "../locking/rtmutex_common.h"
36 
37 /*
38  * Control variables for per-CPU and per-rcu_node kthreads.  These
39  * handle all flavors of RCU.
40  */
41 static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
42 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
43 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
44 DEFINE_PER_CPU(char, rcu_cpu_has_work);
45 
46 #endif /* #ifdef CONFIG_RCU_BOOST */
47 
48 #ifdef CONFIG_RCU_NOCB_CPU
49 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
50 static bool have_rcu_nocb_mask;	    /* Was rcu_nocb_mask allocated? */
51 static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
52 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
53 
54 /*
55  * Check the RCU kernel configuration parameters and print informative
56  * messages about anything out of the ordinary.  If you like #ifdef, you
57  * will love this function.
58  */
59 static void __init rcu_bootup_announce_oddness(void)
60 {
61 #ifdef CONFIG_RCU_TRACE
62 	pr_info("\tRCU debugfs-based tracing is enabled.\n");
63 #endif
64 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
65 	pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
66 	       CONFIG_RCU_FANOUT);
67 #endif
68 #ifdef CONFIG_RCU_FANOUT_EXACT
69 	pr_info("\tHierarchical RCU autobalancing is disabled.\n");
70 #endif
71 #ifdef CONFIG_RCU_FAST_NO_HZ
72 	pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
73 #endif
74 #ifdef CONFIG_PROVE_RCU
75 	pr_info("\tRCU lockdep checking is enabled.\n");
76 #endif
77 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
78 	pr_info("\tRCU torture testing starts during boot.\n");
79 #endif
80 #if defined(CONFIG_RCU_CPU_STALL_INFO)
81 	pr_info("\tAdditional per-CPU info printed with stalls.\n");
82 #endif
83 #if NUM_RCU_LVL_4 != 0
84 	pr_info("\tFour-level hierarchy is enabled.\n");
85 #endif
86 	if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
87 		pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
88 	if (nr_cpu_ids != NR_CPUS)
89 		pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
90 #ifdef CONFIG_RCU_BOOST
91 	pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
92 #endif
93 }
94 
95 #ifdef CONFIG_PREEMPT_RCU
96 
97 RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
98 static struct rcu_state *rcu_state_p = &rcu_preempt_state;
99 
100 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
101 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
102 			       bool wake);
103 
104 /*
105  * Tell them what RCU they are running.
106  */
107 static void __init rcu_bootup_announce(void)
108 {
109 	pr_info("Preemptible hierarchical RCU implementation.\n");
110 	rcu_bootup_announce_oddness();
111 }
112 
113 /*
114  * Record a preemptible-RCU quiescent state for the specified CPU.  Note
115  * that this just means that the task currently running on the CPU is
116  * not in a quiescent state.  There might be any number of tasks blocked
117  * while in an RCU read-side critical section.
118  *
119  * As with the other rcu_*_qs() functions, callers to this function
120  * must disable preemption.
121  */
122 static void rcu_preempt_qs(void)
123 {
124 	if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) {
125 		trace_rcu_grace_period(TPS("rcu_preempt"),
126 				       __this_cpu_read(rcu_preempt_data.gpnum),
127 				       TPS("cpuqs"));
128 		__this_cpu_write(rcu_preempt_data.passed_quiesce, 1);
129 		barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
130 		current->rcu_read_unlock_special.b.need_qs = false;
131 	}
132 }
133 
134 /*
135  * We have entered the scheduler, and the current task might soon be
136  * context-switched away from.  If this task is in an RCU read-side
137  * critical section, we will no longer be able to rely on the CPU to
138  * record that fact, so we enqueue the task on the blkd_tasks list.
139  * The task will dequeue itself when it exits the outermost enclosing
140  * RCU read-side critical section.  Therefore, the current grace period
141  * cannot be permitted to complete until the blkd_tasks list entries
142  * predating the current grace period drain, in other words, until
143  * rnp->gp_tasks becomes NULL.
144  *
145  * Caller must disable preemption.
146  */
147 static void rcu_preempt_note_context_switch(void)
148 {
149 	struct task_struct *t = current;
150 	unsigned long flags;
151 	struct rcu_data *rdp;
152 	struct rcu_node *rnp;
153 
154 	if (t->rcu_read_lock_nesting > 0 &&
155 	    !t->rcu_read_unlock_special.b.blocked) {
156 
157 		/* Possibly blocking in an RCU read-side critical section. */
158 		rdp = this_cpu_ptr(rcu_preempt_state.rda);
159 		rnp = rdp->mynode;
160 		raw_spin_lock_irqsave(&rnp->lock, flags);
161 		smp_mb__after_unlock_lock();
162 		t->rcu_read_unlock_special.b.blocked = true;
163 		t->rcu_blocked_node = rnp;
164 
165 		/*
166 		 * If this CPU has already checked in, then this task
167 		 * will hold up the next grace period rather than the
168 		 * current grace period.  Queue the task accordingly.
169 		 * If the task is queued for the current grace period
170 		 * (i.e., this CPU has not yet passed through a quiescent
171 		 * state for the current grace period), then as long
172 		 * as that task remains queued, the current grace period
173 		 * cannot end.  Note that there is some uncertainty as
174 		 * to exactly when the current grace period started.
175 		 * We take a conservative approach, which can result
176 		 * in unnecessarily waiting on tasks that started very
177 		 * slightly after the current grace period began.  C'est
178 		 * la vie!!!
179 		 *
180 		 * But first, note that the current CPU must still be
181 		 * on line!
182 		 */
183 		WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
184 		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
185 		if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
186 			list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
187 			rnp->gp_tasks = &t->rcu_node_entry;
188 #ifdef CONFIG_RCU_BOOST
189 			if (rnp->boost_tasks != NULL)
190 				rnp->boost_tasks = rnp->gp_tasks;
191 #endif /* #ifdef CONFIG_RCU_BOOST */
192 		} else {
193 			list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
194 			if (rnp->qsmask & rdp->grpmask)
195 				rnp->gp_tasks = &t->rcu_node_entry;
196 		}
197 		trace_rcu_preempt_task(rdp->rsp->name,
198 				       t->pid,
199 				       (rnp->qsmask & rdp->grpmask)
200 				       ? rnp->gpnum
201 				       : rnp->gpnum + 1);
202 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
203 	} else if (t->rcu_read_lock_nesting < 0 &&
204 		   t->rcu_read_unlock_special.s) {
205 
206 		/*
207 		 * Complete exit from RCU read-side critical section on
208 		 * behalf of preempted instance of __rcu_read_unlock().
209 		 */
210 		rcu_read_unlock_special(t);
211 	}
212 
213 	/*
214 	 * Either we were not in an RCU read-side critical section to
215 	 * begin with, or we have now recorded that critical section
216 	 * globally.  Either way, we can now note a quiescent state
217 	 * for this CPU.  Again, if we were in an RCU read-side critical
218 	 * section, and if that critical section was blocking the current
219 	 * grace period, then the fact that the task has been enqueued
220 	 * means that we continue to block the current grace period.
221 	 */
222 	rcu_preempt_qs();
223 }
224 
225 /*
226  * Check for preempted RCU readers blocking the current grace period
227  * for the specified rcu_node structure.  If the caller needs a reliable
228  * answer, it must hold the rcu_node's ->lock.
229  */
230 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
231 {
232 	return rnp->gp_tasks != NULL;
233 }
234 
235 /*
236  * Record a quiescent state for all tasks that were previously queued
237  * on the specified rcu_node structure and that were blocking the current
238  * RCU grace period.  The caller must hold the specified rnp->lock with
239  * irqs disabled, and this lock is released upon return, but irqs remain
240  * disabled.
241  */
242 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
243 	__releases(rnp->lock)
244 {
245 	unsigned long mask;
246 	struct rcu_node *rnp_p;
247 
248 	if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
249 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
250 		return;  /* Still need more quiescent states! */
251 	}
252 
253 	rnp_p = rnp->parent;
254 	if (rnp_p == NULL) {
255 		/*
256 		 * Either there is only one rcu_node in the tree,
257 		 * or tasks were kicked up to root rcu_node due to
258 		 * CPUs going offline.
259 		 */
260 		rcu_report_qs_rsp(&rcu_preempt_state, flags);
261 		return;
262 	}
263 
264 	/* Report up the rest of the hierarchy. */
265 	mask = rnp->grpmask;
266 	raw_spin_unlock(&rnp->lock);	/* irqs remain disabled. */
267 	raw_spin_lock(&rnp_p->lock);	/* irqs already disabled. */
268 	smp_mb__after_unlock_lock();
269 	rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
270 }
271 
272 /*
273  * Advance a ->blkd_tasks-list pointer to the next entry, instead
274  * returning NULL if at the end of the list.
275  */
276 static struct list_head *rcu_next_node_entry(struct task_struct *t,
277 					     struct rcu_node *rnp)
278 {
279 	struct list_head *np;
280 
281 	np = t->rcu_node_entry.next;
282 	if (np == &rnp->blkd_tasks)
283 		np = NULL;
284 	return np;
285 }
286 
287 /*
288  * Return true if the specified rcu_node structure has tasks that were
289  * preempted within an RCU read-side critical section.
290  */
291 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
292 {
293 	return !list_empty(&rnp->blkd_tasks);
294 }
295 
296 /*
297  * Handle special cases during rcu_read_unlock(), such as needing to
298  * notify RCU core processing or task having blocked during the RCU
299  * read-side critical section.
300  */
301 void rcu_read_unlock_special(struct task_struct *t)
302 {
303 	bool empty;
304 	bool empty_exp;
305 	bool empty_norm;
306 	bool empty_exp_now;
307 	unsigned long flags;
308 	struct list_head *np;
309 #ifdef CONFIG_RCU_BOOST
310 	bool drop_boost_mutex = false;
311 #endif /* #ifdef CONFIG_RCU_BOOST */
312 	struct rcu_node *rnp;
313 	union rcu_special special;
314 
315 	/* NMI handlers cannot block and cannot safely manipulate state. */
316 	if (in_nmi())
317 		return;
318 
319 	local_irq_save(flags);
320 
321 	/*
322 	 * If RCU core is waiting for this CPU to exit critical section,
323 	 * let it know that we have done so.  Because irqs are disabled,
324 	 * t->rcu_read_unlock_special cannot change.
325 	 */
326 	special = t->rcu_read_unlock_special;
327 	if (special.b.need_qs) {
328 		rcu_preempt_qs();
329 		t->rcu_read_unlock_special.b.need_qs = false;
330 		if (!t->rcu_read_unlock_special.s) {
331 			local_irq_restore(flags);
332 			return;
333 		}
334 	}
335 
336 	/* Hardware IRQ handlers cannot block, complain if they get here. */
337 	if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) {
338 		local_irq_restore(flags);
339 		return;
340 	}
341 
342 	/* Clean up if blocked during RCU read-side critical section. */
343 	if (special.b.blocked) {
344 		t->rcu_read_unlock_special.b.blocked = false;
345 
346 		/*
347 		 * Remove this task from the list it blocked on.  The
348 		 * task can migrate while we acquire the lock, but at
349 		 * most one time.  So at most two passes through loop.
350 		 */
351 		for (;;) {
352 			rnp = t->rcu_blocked_node;
353 			raw_spin_lock(&rnp->lock);  /* irqs already disabled. */
354 			smp_mb__after_unlock_lock();
355 			if (rnp == t->rcu_blocked_node)
356 				break;
357 			raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
358 		}
359 		empty = !rcu_preempt_has_tasks(rnp);
360 		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
361 		empty_exp = !rcu_preempted_readers_exp(rnp);
362 		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
363 		np = rcu_next_node_entry(t, rnp);
364 		list_del_init(&t->rcu_node_entry);
365 		t->rcu_blocked_node = NULL;
366 		trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
367 						rnp->gpnum, t->pid);
368 		if (&t->rcu_node_entry == rnp->gp_tasks)
369 			rnp->gp_tasks = np;
370 		if (&t->rcu_node_entry == rnp->exp_tasks)
371 			rnp->exp_tasks = np;
372 #ifdef CONFIG_RCU_BOOST
373 		if (&t->rcu_node_entry == rnp->boost_tasks)
374 			rnp->boost_tasks = np;
375 		/* Snapshot ->boost_mtx ownership with rcu_node lock held. */
376 		drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
377 #endif /* #ifdef CONFIG_RCU_BOOST */
378 
379 		/*
380 		 * If this was the last task on the list, go see if we
381 		 * need to propagate ->qsmaskinit bit clearing up the
382 		 * rcu_node tree.
383 		 */
384 		if (!empty && !rcu_preempt_has_tasks(rnp))
385 			rcu_cleanup_dead_rnp(rnp);
386 
387 		/*
388 		 * If this was the last task on the current list, and if
389 		 * we aren't waiting on any CPUs, report the quiescent state.
390 		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
391 		 * so we must take a snapshot of the expedited state.
392 		 */
393 		empty_exp_now = !rcu_preempted_readers_exp(rnp);
394 		if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
395 			trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
396 							 rnp->gpnum,
397 							 0, rnp->qsmask,
398 							 rnp->level,
399 							 rnp->grplo,
400 							 rnp->grphi,
401 							 !!rnp->gp_tasks);
402 			rcu_report_unblock_qs_rnp(rnp, flags);
403 		} else {
404 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
405 		}
406 
407 #ifdef CONFIG_RCU_BOOST
408 		/* Unboost if we were boosted. */
409 		if (drop_boost_mutex)
410 			rt_mutex_unlock(&rnp->boost_mtx);
411 #endif /* #ifdef CONFIG_RCU_BOOST */
412 
413 		/*
414 		 * If this was the last task on the expedited lists,
415 		 * then we need to report up the rcu_node hierarchy.
416 		 */
417 		if (!empty_exp && empty_exp_now)
418 			rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
419 	} else {
420 		local_irq_restore(flags);
421 	}
422 }
423 
424 /*
425  * Dump detailed information for all tasks blocking the current RCU
426  * grace period on the specified rcu_node structure.
427  */
428 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
429 {
430 	unsigned long flags;
431 	struct task_struct *t;
432 
433 	raw_spin_lock_irqsave(&rnp->lock, flags);
434 	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
435 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
436 		return;
437 	}
438 	t = list_entry(rnp->gp_tasks,
439 		       struct task_struct, rcu_node_entry);
440 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
441 		sched_show_task(t);
442 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
443 }
444 
445 /*
446  * Dump detailed information for all tasks blocking the current RCU
447  * grace period.
448  */
449 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
450 {
451 	struct rcu_node *rnp = rcu_get_root(rsp);
452 
453 	rcu_print_detail_task_stall_rnp(rnp);
454 	rcu_for_each_leaf_node(rsp, rnp)
455 		rcu_print_detail_task_stall_rnp(rnp);
456 }
457 
458 #ifdef CONFIG_RCU_CPU_STALL_INFO
459 
460 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
461 {
462 	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
463 	       rnp->level, rnp->grplo, rnp->grphi);
464 }
465 
466 static void rcu_print_task_stall_end(void)
467 {
468 	pr_cont("\n");
469 }
470 
471 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
472 
473 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
474 {
475 }
476 
477 static void rcu_print_task_stall_end(void)
478 {
479 }
480 
481 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
482 
483 /*
484  * Scan the current list of tasks blocked within RCU read-side critical
485  * sections, printing out the tid of each.
486  */
487 static int rcu_print_task_stall(struct rcu_node *rnp)
488 {
489 	struct task_struct *t;
490 	int ndetected = 0;
491 
492 	if (!rcu_preempt_blocked_readers_cgp(rnp))
493 		return 0;
494 	rcu_print_task_stall_begin(rnp);
495 	t = list_entry(rnp->gp_tasks,
496 		       struct task_struct, rcu_node_entry);
497 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
498 		pr_cont(" P%d", t->pid);
499 		ndetected++;
500 	}
501 	rcu_print_task_stall_end();
502 	return ndetected;
503 }
504 
505 /*
506  * Check that the list of blocked tasks for the newly completed grace
507  * period is in fact empty.  It is a serious bug to complete a grace
508  * period that still has RCU readers blocked!  This function must be
509  * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
510  * must be held by the caller.
511  *
512  * Also, if there are blocked tasks on the list, they automatically
513  * block the newly created grace period, so set up ->gp_tasks accordingly.
514  */
515 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
516 {
517 	WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
518 	if (rcu_preempt_has_tasks(rnp))
519 		rnp->gp_tasks = rnp->blkd_tasks.next;
520 	WARN_ON_ONCE(rnp->qsmask);
521 }
522 
523 #ifdef CONFIG_HOTPLUG_CPU
524 
525 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
526 
527 /*
528  * Check for a quiescent state from the current CPU.  When a task blocks,
529  * the task is recorded in the corresponding CPU's rcu_node structure,
530  * which is checked elsewhere.
531  *
532  * Caller must disable hard irqs.
533  */
534 static void rcu_preempt_check_callbacks(void)
535 {
536 	struct task_struct *t = current;
537 
538 	if (t->rcu_read_lock_nesting == 0) {
539 		rcu_preempt_qs();
540 		return;
541 	}
542 	if (t->rcu_read_lock_nesting > 0 &&
543 	    __this_cpu_read(rcu_preempt_data.qs_pending) &&
544 	    !__this_cpu_read(rcu_preempt_data.passed_quiesce))
545 		t->rcu_read_unlock_special.b.need_qs = true;
546 }
547 
548 #ifdef CONFIG_RCU_BOOST
549 
550 static void rcu_preempt_do_callbacks(void)
551 {
552 	rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
553 }
554 
555 #endif /* #ifdef CONFIG_RCU_BOOST */
556 
557 /*
558  * Queue a preemptible-RCU callback for invocation after a grace period.
559  */
560 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
561 {
562 	__call_rcu(head, func, &rcu_preempt_state, -1, 0);
563 }
564 EXPORT_SYMBOL_GPL(call_rcu);
565 
566 /**
567  * synchronize_rcu - wait until a grace period has elapsed.
568  *
569  * Control will return to the caller some time after a full grace
570  * period has elapsed, in other words after all currently executing RCU
571  * read-side critical sections have completed.  Note, however, that
572  * upon return from synchronize_rcu(), the caller might well be executing
573  * concurrently with new RCU read-side critical sections that began while
574  * synchronize_rcu() was waiting.  RCU read-side critical sections are
575  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
576  *
577  * See the description of synchronize_sched() for more detailed information
578  * on memory ordering guarantees.
579  */
580 void synchronize_rcu(void)
581 {
582 	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
583 			   !lock_is_held(&rcu_lock_map) &&
584 			   !lock_is_held(&rcu_sched_lock_map),
585 			   "Illegal synchronize_rcu() in RCU read-side critical section");
586 	if (!rcu_scheduler_active)
587 		return;
588 	if (rcu_expedited)
589 		synchronize_rcu_expedited();
590 	else
591 		wait_rcu_gp(call_rcu);
592 }
593 EXPORT_SYMBOL_GPL(synchronize_rcu);
594 
595 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
596 static unsigned long sync_rcu_preempt_exp_count;
597 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
598 
599 /*
600  * Return non-zero if there are any tasks in RCU read-side critical
601  * sections blocking the current preemptible-RCU expedited grace period.
602  * If there is no preemptible-RCU expedited grace period currently in
603  * progress, returns zero unconditionally.
604  */
605 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
606 {
607 	return rnp->exp_tasks != NULL;
608 }
609 
610 /*
611  * return non-zero if there is no RCU expedited grace period in progress
612  * for the specified rcu_node structure, in other words, if all CPUs and
613  * tasks covered by the specified rcu_node structure have done their bit
614  * for the current expedited grace period.  Works only for preemptible
615  * RCU -- other RCU implementation use other means.
616  *
617  * Caller must hold sync_rcu_preempt_exp_mutex.
618  */
619 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
620 {
621 	return !rcu_preempted_readers_exp(rnp) &&
622 	       ACCESS_ONCE(rnp->expmask) == 0;
623 }
624 
625 /*
626  * Report the exit from RCU read-side critical section for the last task
627  * that queued itself during or before the current expedited preemptible-RCU
628  * grace period.  This event is reported either to the rcu_node structure on
629  * which the task was queued or to one of that rcu_node structure's ancestors,
630  * recursively up the tree.  (Calm down, calm down, we do the recursion
631  * iteratively!)
632  *
633  * Most callers will set the "wake" flag, but the task initiating the
634  * expedited grace period need not wake itself.
635  *
636  * Caller must hold sync_rcu_preempt_exp_mutex.
637  */
638 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
639 			       bool wake)
640 {
641 	unsigned long flags;
642 	unsigned long mask;
643 
644 	raw_spin_lock_irqsave(&rnp->lock, flags);
645 	smp_mb__after_unlock_lock();
646 	for (;;) {
647 		if (!sync_rcu_preempt_exp_done(rnp)) {
648 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
649 			break;
650 		}
651 		if (rnp->parent == NULL) {
652 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
653 			if (wake) {
654 				smp_mb(); /* EGP done before wake_up(). */
655 				wake_up(&sync_rcu_preempt_exp_wq);
656 			}
657 			break;
658 		}
659 		mask = rnp->grpmask;
660 		raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
661 		rnp = rnp->parent;
662 		raw_spin_lock(&rnp->lock); /* irqs already disabled */
663 		smp_mb__after_unlock_lock();
664 		rnp->expmask &= ~mask;
665 	}
666 }
667 
668 /*
669  * Snapshot the tasks blocking the newly started preemptible-RCU expedited
670  * grace period for the specified rcu_node structure.  If there are no such
671  * tasks, report it up the rcu_node hierarchy.
672  *
673  * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
674  * CPU hotplug operations.
675  */
676 static void
677 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
678 {
679 	unsigned long flags;
680 	int must_wait = 0;
681 
682 	raw_spin_lock_irqsave(&rnp->lock, flags);
683 	smp_mb__after_unlock_lock();
684 	if (!rcu_preempt_has_tasks(rnp)) {
685 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
686 	} else {
687 		rnp->exp_tasks = rnp->blkd_tasks.next;
688 		rcu_initiate_boost(rnp, flags);  /* releases rnp->lock */
689 		must_wait = 1;
690 	}
691 	if (!must_wait)
692 		rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
693 }
694 
695 /**
696  * synchronize_rcu_expedited - Brute-force RCU grace period
697  *
698  * Wait for an RCU-preempt grace period, but expedite it.  The basic
699  * idea is to invoke synchronize_sched_expedited() to push all the tasks to
700  * the ->blkd_tasks lists and wait for this list to drain.  This consumes
701  * significant time on all CPUs and is unfriendly to real-time workloads,
702  * so is thus not recommended for any sort of common-case code.
703  * In fact, if you are using synchronize_rcu_expedited() in a loop,
704  * please restructure your code to batch your updates, and then Use a
705  * single synchronize_rcu() instead.
706  */
707 void synchronize_rcu_expedited(void)
708 {
709 	unsigned long flags;
710 	struct rcu_node *rnp;
711 	struct rcu_state *rsp = &rcu_preempt_state;
712 	unsigned long snap;
713 	int trycount = 0;
714 
715 	smp_mb(); /* Caller's modifications seen first by other CPUs. */
716 	snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
717 	smp_mb(); /* Above access cannot bleed into critical section. */
718 
719 	/*
720 	 * Block CPU-hotplug operations.  This means that any CPU-hotplug
721 	 * operation that finds an rcu_node structure with tasks in the
722 	 * process of being boosted will know that all tasks blocking
723 	 * this expedited grace period will already be in the process of
724 	 * being boosted.  This simplifies the process of moving tasks
725 	 * from leaf to root rcu_node structures.
726 	 */
727 	if (!try_get_online_cpus()) {
728 		/* CPU-hotplug operation in flight, fall back to normal GP. */
729 		wait_rcu_gp(call_rcu);
730 		return;
731 	}
732 
733 	/*
734 	 * Acquire lock, falling back to synchronize_rcu() if too many
735 	 * lock-acquisition failures.  Of course, if someone does the
736 	 * expedited grace period for us, just leave.
737 	 */
738 	while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
739 		if (ULONG_CMP_LT(snap,
740 		    ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
741 			put_online_cpus();
742 			goto mb_ret; /* Others did our work for us. */
743 		}
744 		if (trycount++ < 10) {
745 			udelay(trycount * num_online_cpus());
746 		} else {
747 			put_online_cpus();
748 			wait_rcu_gp(call_rcu);
749 			return;
750 		}
751 	}
752 	if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
753 		put_online_cpus();
754 		goto unlock_mb_ret; /* Others did our work for us. */
755 	}
756 
757 	/* force all RCU readers onto ->blkd_tasks lists. */
758 	synchronize_sched_expedited();
759 
760 	/* Initialize ->expmask for all non-leaf rcu_node structures. */
761 	rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
762 		raw_spin_lock_irqsave(&rnp->lock, flags);
763 		smp_mb__after_unlock_lock();
764 		rnp->expmask = rnp->qsmaskinit;
765 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
766 	}
767 
768 	/* Snapshot current state of ->blkd_tasks lists. */
769 	rcu_for_each_leaf_node(rsp, rnp)
770 		sync_rcu_preempt_exp_init(rsp, rnp);
771 	if (NUM_RCU_NODES > 1)
772 		sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
773 
774 	put_online_cpus();
775 
776 	/* Wait for snapshotted ->blkd_tasks lists to drain. */
777 	rnp = rcu_get_root(rsp);
778 	wait_event(sync_rcu_preempt_exp_wq,
779 		   sync_rcu_preempt_exp_done(rnp));
780 
781 	/* Clean up and exit. */
782 	smp_mb(); /* ensure expedited GP seen before counter increment. */
783 	ACCESS_ONCE(sync_rcu_preempt_exp_count) =
784 					sync_rcu_preempt_exp_count + 1;
785 unlock_mb_ret:
786 	mutex_unlock(&sync_rcu_preempt_exp_mutex);
787 mb_ret:
788 	smp_mb(); /* ensure subsequent action seen after grace period. */
789 }
790 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
791 
792 /**
793  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
794  *
795  * Note that this primitive does not necessarily wait for an RCU grace period
796  * to complete.  For example, if there are no RCU callbacks queued anywhere
797  * in the system, then rcu_barrier() is within its rights to return
798  * immediately, without waiting for anything, much less an RCU grace period.
799  */
800 void rcu_barrier(void)
801 {
802 	_rcu_barrier(&rcu_preempt_state);
803 }
804 EXPORT_SYMBOL_GPL(rcu_barrier);
805 
806 /*
807  * Initialize preemptible RCU's state structures.
808  */
809 static void __init __rcu_init_preempt(void)
810 {
811 	rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
812 }
813 
814 /*
815  * Check for a task exiting while in a preemptible-RCU read-side
816  * critical section, clean up if so.  No need to issue warnings,
817  * as debug_check_no_locks_held() already does this if lockdep
818  * is enabled.
819  */
820 void exit_rcu(void)
821 {
822 	struct task_struct *t = current;
823 
824 	if (likely(list_empty(&current->rcu_node_entry)))
825 		return;
826 	t->rcu_read_lock_nesting = 1;
827 	barrier();
828 	t->rcu_read_unlock_special.b.blocked = true;
829 	__rcu_read_unlock();
830 }
831 
832 #else /* #ifdef CONFIG_PREEMPT_RCU */
833 
834 static struct rcu_state *rcu_state_p = &rcu_sched_state;
835 
836 /*
837  * Tell them what RCU they are running.
838  */
839 static void __init rcu_bootup_announce(void)
840 {
841 	pr_info("Hierarchical RCU implementation.\n");
842 	rcu_bootup_announce_oddness();
843 }
844 
845 /*
846  * Because preemptible RCU does not exist, we never have to check for
847  * CPUs being in quiescent states.
848  */
849 static void rcu_preempt_note_context_switch(void)
850 {
851 }
852 
853 /*
854  * Because preemptible RCU does not exist, there are never any preempted
855  * RCU readers.
856  */
857 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
858 {
859 	return 0;
860 }
861 
862 #ifdef CONFIG_HOTPLUG_CPU
863 
864 /*
865  * Because there is no preemptible RCU, there can be no readers blocked.
866  */
867 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
868 {
869 	return false;
870 }
871 
872 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
873 
874 /*
875  * Because preemptible RCU does not exist, we never have to check for
876  * tasks blocked within RCU read-side critical sections.
877  */
878 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
879 {
880 }
881 
882 /*
883  * Because preemptible RCU does not exist, we never have to check for
884  * tasks blocked within RCU read-side critical sections.
885  */
886 static int rcu_print_task_stall(struct rcu_node *rnp)
887 {
888 	return 0;
889 }
890 
891 /*
892  * Because there is no preemptible RCU, there can be no readers blocked,
893  * so there is no need to check for blocked tasks.  So check only for
894  * bogus qsmask values.
895  */
896 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
897 {
898 	WARN_ON_ONCE(rnp->qsmask);
899 }
900 
901 /*
902  * Because preemptible RCU does not exist, it never has any callbacks
903  * to check.
904  */
905 static void rcu_preempt_check_callbacks(void)
906 {
907 }
908 
909 /*
910  * Wait for an rcu-preempt grace period, but make it happen quickly.
911  * But because preemptible RCU does not exist, map to rcu-sched.
912  */
913 void synchronize_rcu_expedited(void)
914 {
915 	synchronize_sched_expedited();
916 }
917 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
918 
919 /*
920  * Because preemptible RCU does not exist, rcu_barrier() is just
921  * another name for rcu_barrier_sched().
922  */
923 void rcu_barrier(void)
924 {
925 	rcu_barrier_sched();
926 }
927 EXPORT_SYMBOL_GPL(rcu_barrier);
928 
929 /*
930  * Because preemptible RCU does not exist, it need not be initialized.
931  */
932 static void __init __rcu_init_preempt(void)
933 {
934 }
935 
936 /*
937  * Because preemptible RCU does not exist, tasks cannot possibly exit
938  * while in preemptible RCU read-side critical sections.
939  */
940 void exit_rcu(void)
941 {
942 }
943 
944 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
945 
946 #ifdef CONFIG_RCU_BOOST
947 
948 #include "../locking/rtmutex_common.h"
949 
950 #ifdef CONFIG_RCU_TRACE
951 
952 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
953 {
954 	if (!rcu_preempt_has_tasks(rnp))
955 		rnp->n_balk_blkd_tasks++;
956 	else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
957 		rnp->n_balk_exp_gp_tasks++;
958 	else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
959 		rnp->n_balk_boost_tasks++;
960 	else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
961 		rnp->n_balk_notblocked++;
962 	else if (rnp->gp_tasks != NULL &&
963 		 ULONG_CMP_LT(jiffies, rnp->boost_time))
964 		rnp->n_balk_notyet++;
965 	else
966 		rnp->n_balk_nos++;
967 }
968 
969 #else /* #ifdef CONFIG_RCU_TRACE */
970 
971 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
972 {
973 }
974 
975 #endif /* #else #ifdef CONFIG_RCU_TRACE */
976 
977 static void rcu_wake_cond(struct task_struct *t, int status)
978 {
979 	/*
980 	 * If the thread is yielding, only wake it when this
981 	 * is invoked from idle
982 	 */
983 	if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
984 		wake_up_process(t);
985 }
986 
987 /*
988  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
989  * or ->boost_tasks, advancing the pointer to the next task in the
990  * ->blkd_tasks list.
991  *
992  * Note that irqs must be enabled: boosting the task can block.
993  * Returns 1 if there are more tasks needing to be boosted.
994  */
995 static int rcu_boost(struct rcu_node *rnp)
996 {
997 	unsigned long flags;
998 	struct task_struct *t;
999 	struct list_head *tb;
1000 
1001 	if (ACCESS_ONCE(rnp->exp_tasks) == NULL &&
1002 	    ACCESS_ONCE(rnp->boost_tasks) == NULL)
1003 		return 0;  /* Nothing left to boost. */
1004 
1005 	raw_spin_lock_irqsave(&rnp->lock, flags);
1006 	smp_mb__after_unlock_lock();
1007 
1008 	/*
1009 	 * Recheck under the lock: all tasks in need of boosting
1010 	 * might exit their RCU read-side critical sections on their own.
1011 	 */
1012 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1013 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1014 		return 0;
1015 	}
1016 
1017 	/*
1018 	 * Preferentially boost tasks blocking expedited grace periods.
1019 	 * This cannot starve the normal grace periods because a second
1020 	 * expedited grace period must boost all blocked tasks, including
1021 	 * those blocking the pre-existing normal grace period.
1022 	 */
1023 	if (rnp->exp_tasks != NULL) {
1024 		tb = rnp->exp_tasks;
1025 		rnp->n_exp_boosts++;
1026 	} else {
1027 		tb = rnp->boost_tasks;
1028 		rnp->n_normal_boosts++;
1029 	}
1030 	rnp->n_tasks_boosted++;
1031 
1032 	/*
1033 	 * We boost task t by manufacturing an rt_mutex that appears to
1034 	 * be held by task t.  We leave a pointer to that rt_mutex where
1035 	 * task t can find it, and task t will release the mutex when it
1036 	 * exits its outermost RCU read-side critical section.  Then
1037 	 * simply acquiring this artificial rt_mutex will boost task
1038 	 * t's priority.  (Thanks to tglx for suggesting this approach!)
1039 	 *
1040 	 * Note that task t must acquire rnp->lock to remove itself from
1041 	 * the ->blkd_tasks list, which it will do from exit() if from
1042 	 * nowhere else.  We therefore are guaranteed that task t will
1043 	 * stay around at least until we drop rnp->lock.  Note that
1044 	 * rnp->lock also resolves races between our priority boosting
1045 	 * and task t's exiting its outermost RCU read-side critical
1046 	 * section.
1047 	 */
1048 	t = container_of(tb, struct task_struct, rcu_node_entry);
1049 	rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
1050 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1051 	/* Lock only for side effect: boosts task t's priority. */
1052 	rt_mutex_lock(&rnp->boost_mtx);
1053 	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
1054 
1055 	return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1056 	       ACCESS_ONCE(rnp->boost_tasks) != NULL;
1057 }
1058 
1059 /*
1060  * Priority-boosting kthread.  One per leaf rcu_node and one for the
1061  * root rcu_node.
1062  */
1063 static int rcu_boost_kthread(void *arg)
1064 {
1065 	struct rcu_node *rnp = (struct rcu_node *)arg;
1066 	int spincnt = 0;
1067 	int more2boost;
1068 
1069 	trace_rcu_utilization(TPS("Start boost kthread@init"));
1070 	for (;;) {
1071 		rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1072 		trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1073 		rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1074 		trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1075 		rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1076 		more2boost = rcu_boost(rnp);
1077 		if (more2boost)
1078 			spincnt++;
1079 		else
1080 			spincnt = 0;
1081 		if (spincnt > 10) {
1082 			rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1083 			trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1084 			schedule_timeout_interruptible(2);
1085 			trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1086 			spincnt = 0;
1087 		}
1088 	}
1089 	/* NOTREACHED */
1090 	trace_rcu_utilization(TPS("End boost kthread@notreached"));
1091 	return 0;
1092 }
1093 
1094 /*
1095  * Check to see if it is time to start boosting RCU readers that are
1096  * blocking the current grace period, and, if so, tell the per-rcu_node
1097  * kthread to start boosting them.  If there is an expedited grace
1098  * period in progress, it is always time to boost.
1099  *
1100  * The caller must hold rnp->lock, which this function releases.
1101  * The ->boost_kthread_task is immortal, so we don't need to worry
1102  * about it going away.
1103  */
1104 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1105 	__releases(rnp->lock)
1106 {
1107 	struct task_struct *t;
1108 
1109 	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1110 		rnp->n_balk_exp_gp_tasks++;
1111 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1112 		return;
1113 	}
1114 	if (rnp->exp_tasks != NULL ||
1115 	    (rnp->gp_tasks != NULL &&
1116 	     rnp->boost_tasks == NULL &&
1117 	     rnp->qsmask == 0 &&
1118 	     ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1119 		if (rnp->exp_tasks == NULL)
1120 			rnp->boost_tasks = rnp->gp_tasks;
1121 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1122 		t = rnp->boost_kthread_task;
1123 		if (t)
1124 			rcu_wake_cond(t, rnp->boost_kthread_status);
1125 	} else {
1126 		rcu_initiate_boost_trace(rnp);
1127 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
1128 	}
1129 }
1130 
1131 /*
1132  * Wake up the per-CPU kthread to invoke RCU callbacks.
1133  */
1134 static void invoke_rcu_callbacks_kthread(void)
1135 {
1136 	unsigned long flags;
1137 
1138 	local_irq_save(flags);
1139 	__this_cpu_write(rcu_cpu_has_work, 1);
1140 	if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1141 	    current != __this_cpu_read(rcu_cpu_kthread_task)) {
1142 		rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1143 			      __this_cpu_read(rcu_cpu_kthread_status));
1144 	}
1145 	local_irq_restore(flags);
1146 }
1147 
1148 /*
1149  * Is the current CPU running the RCU-callbacks kthread?
1150  * Caller must have preemption disabled.
1151  */
1152 static bool rcu_is_callbacks_kthread(void)
1153 {
1154 	return __this_cpu_read(rcu_cpu_kthread_task) == current;
1155 }
1156 
1157 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1158 
1159 /*
1160  * Do priority-boost accounting for the start of a new grace period.
1161  */
1162 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1163 {
1164 	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1165 }
1166 
1167 /*
1168  * Create an RCU-boost kthread for the specified node if one does not
1169  * already exist.  We only create this kthread for preemptible RCU.
1170  * Returns zero if all is well, a negated errno otherwise.
1171  */
1172 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1173 						 struct rcu_node *rnp)
1174 {
1175 	int rnp_index = rnp - &rsp->node[0];
1176 	unsigned long flags;
1177 	struct sched_param sp;
1178 	struct task_struct *t;
1179 
1180 	if (&rcu_preempt_state != rsp)
1181 		return 0;
1182 
1183 	if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1184 		return 0;
1185 
1186 	rsp->boost = 1;
1187 	if (rnp->boost_kthread_task != NULL)
1188 		return 0;
1189 	t = kthread_create(rcu_boost_kthread, (void *)rnp,
1190 			   "rcub/%d", rnp_index);
1191 	if (IS_ERR(t))
1192 		return PTR_ERR(t);
1193 	raw_spin_lock_irqsave(&rnp->lock, flags);
1194 	smp_mb__after_unlock_lock();
1195 	rnp->boost_kthread_task = t;
1196 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1197 	sp.sched_priority = kthread_prio;
1198 	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1199 	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1200 	return 0;
1201 }
1202 
1203 static void rcu_kthread_do_work(void)
1204 {
1205 	rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
1206 	rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
1207 	rcu_preempt_do_callbacks();
1208 }
1209 
1210 static void rcu_cpu_kthread_setup(unsigned int cpu)
1211 {
1212 	struct sched_param sp;
1213 
1214 	sp.sched_priority = kthread_prio;
1215 	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1216 }
1217 
1218 static void rcu_cpu_kthread_park(unsigned int cpu)
1219 {
1220 	per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1221 }
1222 
1223 static int rcu_cpu_kthread_should_run(unsigned int cpu)
1224 {
1225 	return __this_cpu_read(rcu_cpu_has_work);
1226 }
1227 
1228 /*
1229  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
1230  * RCU softirq used in flavors and configurations of RCU that do not
1231  * support RCU priority boosting.
1232  */
1233 static void rcu_cpu_kthread(unsigned int cpu)
1234 {
1235 	unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
1236 	char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
1237 	int spincnt;
1238 
1239 	for (spincnt = 0; spincnt < 10; spincnt++) {
1240 		trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
1241 		local_bh_disable();
1242 		*statusp = RCU_KTHREAD_RUNNING;
1243 		this_cpu_inc(rcu_cpu_kthread_loops);
1244 		local_irq_disable();
1245 		work = *workp;
1246 		*workp = 0;
1247 		local_irq_enable();
1248 		if (work)
1249 			rcu_kthread_do_work();
1250 		local_bh_enable();
1251 		if (*workp == 0) {
1252 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
1253 			*statusp = RCU_KTHREAD_WAITING;
1254 			return;
1255 		}
1256 	}
1257 	*statusp = RCU_KTHREAD_YIELDING;
1258 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
1259 	schedule_timeout_interruptible(2);
1260 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
1261 	*statusp = RCU_KTHREAD_WAITING;
1262 }
1263 
1264 /*
1265  * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1266  * served by the rcu_node in question.  The CPU hotplug lock is still
1267  * held, so the value of rnp->qsmaskinit will be stable.
1268  *
1269  * We don't include outgoingcpu in the affinity set, use -1 if there is
1270  * no outgoing CPU.  If there are no CPUs left in the affinity set,
1271  * this function allows the kthread to execute on any CPU.
1272  */
1273 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1274 {
1275 	struct task_struct *t = rnp->boost_kthread_task;
1276 	unsigned long mask = rnp->qsmaskinit;
1277 	cpumask_var_t cm;
1278 	int cpu;
1279 
1280 	if (!t)
1281 		return;
1282 	if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1283 		return;
1284 	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1285 		if ((mask & 0x1) && cpu != outgoingcpu)
1286 			cpumask_set_cpu(cpu, cm);
1287 	if (cpumask_weight(cm) == 0)
1288 		cpumask_setall(cm);
1289 	set_cpus_allowed_ptr(t, cm);
1290 	free_cpumask_var(cm);
1291 }
1292 
1293 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1294 	.store			= &rcu_cpu_kthread_task,
1295 	.thread_should_run	= rcu_cpu_kthread_should_run,
1296 	.thread_fn		= rcu_cpu_kthread,
1297 	.thread_comm		= "rcuc/%u",
1298 	.setup			= rcu_cpu_kthread_setup,
1299 	.park			= rcu_cpu_kthread_park,
1300 };
1301 
1302 /*
1303  * Spawn boost kthreads -- called as soon as the scheduler is running.
1304  */
1305 static void __init rcu_spawn_boost_kthreads(void)
1306 {
1307 	struct rcu_node *rnp;
1308 	int cpu;
1309 
1310 	for_each_possible_cpu(cpu)
1311 		per_cpu(rcu_cpu_has_work, cpu) = 0;
1312 	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1313 	rcu_for_each_leaf_node(rcu_state_p, rnp)
1314 		(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1315 }
1316 
1317 static void rcu_prepare_kthreads(int cpu)
1318 {
1319 	struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
1320 	struct rcu_node *rnp = rdp->mynode;
1321 
1322 	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1323 	if (rcu_scheduler_fully_active)
1324 		(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1325 }
1326 
1327 #else /* #ifdef CONFIG_RCU_BOOST */
1328 
1329 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1330 	__releases(rnp->lock)
1331 {
1332 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
1333 }
1334 
1335 static void invoke_rcu_callbacks_kthread(void)
1336 {
1337 	WARN_ON_ONCE(1);
1338 }
1339 
1340 static bool rcu_is_callbacks_kthread(void)
1341 {
1342 	return false;
1343 }
1344 
1345 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1346 {
1347 }
1348 
1349 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1350 {
1351 }
1352 
1353 static void __init rcu_spawn_boost_kthreads(void)
1354 {
1355 }
1356 
1357 static void rcu_prepare_kthreads(int cpu)
1358 {
1359 }
1360 
1361 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1362 
1363 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1364 
1365 /*
1366  * Check to see if any future RCU-related work will need to be done
1367  * by the current CPU, even if none need be done immediately, returning
1368  * 1 if so.  This function is part of the RCU implementation; it is -not-
1369  * an exported member of the RCU API.
1370  *
1371  * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1372  * any flavor of RCU.
1373  */
1374 #ifndef CONFIG_RCU_NOCB_CPU_ALL
1375 int rcu_needs_cpu(unsigned long *delta_jiffies)
1376 {
1377 	*delta_jiffies = ULONG_MAX;
1378 	return rcu_cpu_has_callbacks(NULL);
1379 }
1380 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1381 
1382 /*
1383  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1384  * after it.
1385  */
1386 static void rcu_cleanup_after_idle(void)
1387 {
1388 }
1389 
1390 /*
1391  * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1392  * is nothing.
1393  */
1394 static void rcu_prepare_for_idle(void)
1395 {
1396 }
1397 
1398 /*
1399  * Don't bother keeping a running count of the number of RCU callbacks
1400  * posted because CONFIG_RCU_FAST_NO_HZ=n.
1401  */
1402 static void rcu_idle_count_callbacks_posted(void)
1403 {
1404 }
1405 
1406 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1407 
1408 /*
1409  * This code is invoked when a CPU goes idle, at which point we want
1410  * to have the CPU do everything required for RCU so that it can enter
1411  * the energy-efficient dyntick-idle mode.  This is handled by a
1412  * state machine implemented by rcu_prepare_for_idle() below.
1413  *
1414  * The following three proprocessor symbols control this state machine:
1415  *
1416  * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1417  *	to sleep in dyntick-idle mode with RCU callbacks pending.  This
1418  *	is sized to be roughly one RCU grace period.  Those energy-efficiency
1419  *	benchmarkers who might otherwise be tempted to set this to a large
1420  *	number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1421  *	system.  And if you are -that- concerned about energy efficiency,
1422  *	just power the system down and be done with it!
1423  * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1424  *	permitted to sleep in dyntick-idle mode with only lazy RCU
1425  *	callbacks pending.  Setting this too high can OOM your system.
1426  *
1427  * The values below work well in practice.  If future workloads require
1428  * adjustment, they can be converted into kernel config parameters, though
1429  * making the state machine smarter might be a better option.
1430  */
1431 #define RCU_IDLE_GP_DELAY 4		/* Roughly one grace period. */
1432 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)	/* Roughly six seconds. */
1433 
1434 static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1435 module_param(rcu_idle_gp_delay, int, 0644);
1436 static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1437 module_param(rcu_idle_lazy_gp_delay, int, 0644);
1438 
1439 extern int tick_nohz_active;
1440 
1441 /*
1442  * Try to advance callbacks for all flavors of RCU on the current CPU, but
1443  * only if it has been awhile since the last time we did so.  Afterwards,
1444  * if there are any callbacks ready for immediate invocation, return true.
1445  */
1446 static bool __maybe_unused rcu_try_advance_all_cbs(void)
1447 {
1448 	bool cbs_ready = false;
1449 	struct rcu_data *rdp;
1450 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1451 	struct rcu_node *rnp;
1452 	struct rcu_state *rsp;
1453 
1454 	/* Exit early if we advanced recently. */
1455 	if (jiffies == rdtp->last_advance_all)
1456 		return false;
1457 	rdtp->last_advance_all = jiffies;
1458 
1459 	for_each_rcu_flavor(rsp) {
1460 		rdp = this_cpu_ptr(rsp->rda);
1461 		rnp = rdp->mynode;
1462 
1463 		/*
1464 		 * Don't bother checking unless a grace period has
1465 		 * completed since we last checked and there are
1466 		 * callbacks not yet ready to invoke.
1467 		 */
1468 		if ((rdp->completed != rnp->completed ||
1469 		     unlikely(ACCESS_ONCE(rdp->gpwrap))) &&
1470 		    rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
1471 			note_gp_changes(rsp, rdp);
1472 
1473 		if (cpu_has_callbacks_ready_to_invoke(rdp))
1474 			cbs_ready = true;
1475 	}
1476 	return cbs_ready;
1477 }
1478 
1479 /*
1480  * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1481  * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
1482  * caller to set the timeout based on whether or not there are non-lazy
1483  * callbacks.
1484  *
1485  * The caller must have disabled interrupts.
1486  */
1487 #ifndef CONFIG_RCU_NOCB_CPU_ALL
1488 int rcu_needs_cpu(unsigned long *dj)
1489 {
1490 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1491 
1492 	/* Snapshot to detect later posting of non-lazy callback. */
1493 	rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1494 
1495 	/* If no callbacks, RCU doesn't need the CPU. */
1496 	if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
1497 		*dj = ULONG_MAX;
1498 		return 0;
1499 	}
1500 
1501 	/* Attempt to advance callbacks. */
1502 	if (rcu_try_advance_all_cbs()) {
1503 		/* Some ready to invoke, so initiate later invocation. */
1504 		invoke_rcu_core();
1505 		return 1;
1506 	}
1507 	rdtp->last_accelerate = jiffies;
1508 
1509 	/* Request timer delay depending on laziness, and round. */
1510 	if (!rdtp->all_lazy) {
1511 		*dj = round_up(rcu_idle_gp_delay + jiffies,
1512 			       rcu_idle_gp_delay) - jiffies;
1513 	} else {
1514 		*dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1515 	}
1516 	return 0;
1517 }
1518 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1519 
1520 /*
1521  * Prepare a CPU for idle from an RCU perspective.  The first major task
1522  * is to sense whether nohz mode has been enabled or disabled via sysfs.
1523  * The second major task is to check to see if a non-lazy callback has
1524  * arrived at a CPU that previously had only lazy callbacks.  The third
1525  * major task is to accelerate (that is, assign grace-period numbers to)
1526  * any recently arrived callbacks.
1527  *
1528  * The caller must have disabled interrupts.
1529  */
1530 static void rcu_prepare_for_idle(void)
1531 {
1532 #ifndef CONFIG_RCU_NOCB_CPU_ALL
1533 	bool needwake;
1534 	struct rcu_data *rdp;
1535 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1536 	struct rcu_node *rnp;
1537 	struct rcu_state *rsp;
1538 	int tne;
1539 
1540 	/* Handle nohz enablement switches conservatively. */
1541 	tne = ACCESS_ONCE(tick_nohz_active);
1542 	if (tne != rdtp->tick_nohz_enabled_snap) {
1543 		if (rcu_cpu_has_callbacks(NULL))
1544 			invoke_rcu_core(); /* force nohz to see update. */
1545 		rdtp->tick_nohz_enabled_snap = tne;
1546 		return;
1547 	}
1548 	if (!tne)
1549 		return;
1550 
1551 	/* If this is a no-CBs CPU, no callbacks, just return. */
1552 	if (rcu_is_nocb_cpu(smp_processor_id()))
1553 		return;
1554 
1555 	/*
1556 	 * If a non-lazy callback arrived at a CPU having only lazy
1557 	 * callbacks, invoke RCU core for the side-effect of recalculating
1558 	 * idle duration on re-entry to idle.
1559 	 */
1560 	if (rdtp->all_lazy &&
1561 	    rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1562 		rdtp->all_lazy = false;
1563 		rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1564 		invoke_rcu_core();
1565 		return;
1566 	}
1567 
1568 	/*
1569 	 * If we have not yet accelerated this jiffy, accelerate all
1570 	 * callbacks on this CPU.
1571 	 */
1572 	if (rdtp->last_accelerate == jiffies)
1573 		return;
1574 	rdtp->last_accelerate = jiffies;
1575 	for_each_rcu_flavor(rsp) {
1576 		rdp = this_cpu_ptr(rsp->rda);
1577 		if (!*rdp->nxttail[RCU_DONE_TAIL])
1578 			continue;
1579 		rnp = rdp->mynode;
1580 		raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1581 		smp_mb__after_unlock_lock();
1582 		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
1583 		raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1584 		if (needwake)
1585 			rcu_gp_kthread_wake(rsp);
1586 	}
1587 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1588 }
1589 
1590 /*
1591  * Clean up for exit from idle.  Attempt to advance callbacks based on
1592  * any grace periods that elapsed while the CPU was idle, and if any
1593  * callbacks are now ready to invoke, initiate invocation.
1594  */
1595 static void rcu_cleanup_after_idle(void)
1596 {
1597 #ifndef CONFIG_RCU_NOCB_CPU_ALL
1598 	if (rcu_is_nocb_cpu(smp_processor_id()))
1599 		return;
1600 	if (rcu_try_advance_all_cbs())
1601 		invoke_rcu_core();
1602 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1603 }
1604 
1605 /*
1606  * Keep a running count of the number of non-lazy callbacks posted
1607  * on this CPU.  This running counter (which is never decremented) allows
1608  * rcu_prepare_for_idle() to detect when something out of the idle loop
1609  * posts a callback, even if an equal number of callbacks are invoked.
1610  * Of course, callbacks should only be posted from within a trace event
1611  * designed to be called from idle or from within RCU_NONIDLE().
1612  */
1613 static void rcu_idle_count_callbacks_posted(void)
1614 {
1615 	__this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1616 }
1617 
1618 /*
1619  * Data for flushing lazy RCU callbacks at OOM time.
1620  */
1621 static atomic_t oom_callback_count;
1622 static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1623 
1624 /*
1625  * RCU OOM callback -- decrement the outstanding count and deliver the
1626  * wake-up if we are the last one.
1627  */
1628 static void rcu_oom_callback(struct rcu_head *rhp)
1629 {
1630 	if (atomic_dec_and_test(&oom_callback_count))
1631 		wake_up(&oom_callback_wq);
1632 }
1633 
1634 /*
1635  * Post an rcu_oom_notify callback on the current CPU if it has at
1636  * least one lazy callback.  This will unnecessarily post callbacks
1637  * to CPUs that already have a non-lazy callback at the end of their
1638  * callback list, but this is an infrequent operation, so accept some
1639  * extra overhead to keep things simple.
1640  */
1641 static void rcu_oom_notify_cpu(void *unused)
1642 {
1643 	struct rcu_state *rsp;
1644 	struct rcu_data *rdp;
1645 
1646 	for_each_rcu_flavor(rsp) {
1647 		rdp = raw_cpu_ptr(rsp->rda);
1648 		if (rdp->qlen_lazy != 0) {
1649 			atomic_inc(&oom_callback_count);
1650 			rsp->call(&rdp->oom_head, rcu_oom_callback);
1651 		}
1652 	}
1653 }
1654 
1655 /*
1656  * If low on memory, ensure that each CPU has a non-lazy callback.
1657  * This will wake up CPUs that have only lazy callbacks, in turn
1658  * ensuring that they free up the corresponding memory in a timely manner.
1659  * Because an uncertain amount of memory will be freed in some uncertain
1660  * timeframe, we do not claim to have freed anything.
1661  */
1662 static int rcu_oom_notify(struct notifier_block *self,
1663 			  unsigned long notused, void *nfreed)
1664 {
1665 	int cpu;
1666 
1667 	/* Wait for callbacks from earlier instance to complete. */
1668 	wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1669 	smp_mb(); /* Ensure callback reuse happens after callback invocation. */
1670 
1671 	/*
1672 	 * Prevent premature wakeup: ensure that all increments happen
1673 	 * before there is a chance of the counter reaching zero.
1674 	 */
1675 	atomic_set(&oom_callback_count, 1);
1676 
1677 	get_online_cpus();
1678 	for_each_online_cpu(cpu) {
1679 		smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1680 		cond_resched_rcu_qs();
1681 	}
1682 	put_online_cpus();
1683 
1684 	/* Unconditionally decrement: no need to wake ourselves up. */
1685 	atomic_dec(&oom_callback_count);
1686 
1687 	return NOTIFY_OK;
1688 }
1689 
1690 static struct notifier_block rcu_oom_nb = {
1691 	.notifier_call = rcu_oom_notify
1692 };
1693 
1694 static int __init rcu_register_oom_notifier(void)
1695 {
1696 	register_oom_notifier(&rcu_oom_nb);
1697 	return 0;
1698 }
1699 early_initcall(rcu_register_oom_notifier);
1700 
1701 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1702 
1703 #ifdef CONFIG_RCU_CPU_STALL_INFO
1704 
1705 #ifdef CONFIG_RCU_FAST_NO_HZ
1706 
1707 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1708 {
1709 	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1710 	unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
1711 
1712 	sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
1713 		rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
1714 		ulong2long(nlpd),
1715 		rdtp->all_lazy ? 'L' : '.',
1716 		rdtp->tick_nohz_enabled_snap ? '.' : 'D');
1717 }
1718 
1719 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
1720 
1721 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1722 {
1723 	*cp = '\0';
1724 }
1725 
1726 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
1727 
1728 /* Initiate the stall-info list. */
1729 static void print_cpu_stall_info_begin(void)
1730 {
1731 	pr_cont("\n");
1732 }
1733 
1734 /*
1735  * Print out diagnostic information for the specified stalled CPU.
1736  *
1737  * If the specified CPU is aware of the current RCU grace period
1738  * (flavor specified by rsp), then print the number of scheduling
1739  * clock interrupts the CPU has taken during the time that it has
1740  * been aware.  Otherwise, print the number of RCU grace periods
1741  * that this CPU is ignorant of, for example, "1" if the CPU was
1742  * aware of the previous grace period.
1743  *
1744  * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
1745  */
1746 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1747 {
1748 	char fast_no_hz[72];
1749 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1750 	struct rcu_dynticks *rdtp = rdp->dynticks;
1751 	char *ticks_title;
1752 	unsigned long ticks_value;
1753 
1754 	if (rsp->gpnum == rdp->gpnum) {
1755 		ticks_title = "ticks this GP";
1756 		ticks_value = rdp->ticks_this_gp;
1757 	} else {
1758 		ticks_title = "GPs behind";
1759 		ticks_value = rsp->gpnum - rdp->gpnum;
1760 	}
1761 	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1762 	pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
1763 	       cpu, ticks_value, ticks_title,
1764 	       atomic_read(&rdtp->dynticks) & 0xfff,
1765 	       rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1766 	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1767 	       ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
1768 	       fast_no_hz);
1769 }
1770 
1771 /* Terminate the stall-info list. */
1772 static void print_cpu_stall_info_end(void)
1773 {
1774 	pr_err("\t");
1775 }
1776 
1777 /* Zero ->ticks_this_gp for all flavors of RCU. */
1778 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1779 {
1780 	rdp->ticks_this_gp = 0;
1781 	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
1782 }
1783 
1784 /* Increment ->ticks_this_gp for all flavors of RCU. */
1785 static void increment_cpu_stall_ticks(void)
1786 {
1787 	struct rcu_state *rsp;
1788 
1789 	for_each_rcu_flavor(rsp)
1790 		raw_cpu_inc(rsp->rda->ticks_this_gp);
1791 }
1792 
1793 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
1794 
1795 static void print_cpu_stall_info_begin(void)
1796 {
1797 	pr_cont(" {");
1798 }
1799 
1800 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1801 {
1802 	pr_cont(" %d", cpu);
1803 }
1804 
1805 static void print_cpu_stall_info_end(void)
1806 {
1807 	pr_cont("} ");
1808 }
1809 
1810 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1811 {
1812 }
1813 
1814 static void increment_cpu_stall_ticks(void)
1815 {
1816 }
1817 
1818 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
1819 
1820 #ifdef CONFIG_RCU_NOCB_CPU
1821 
1822 /*
1823  * Offload callback processing from the boot-time-specified set of CPUs
1824  * specified by rcu_nocb_mask.  For each CPU in the set, there is a
1825  * kthread created that pulls the callbacks from the corresponding CPU,
1826  * waits for a grace period to elapse, and invokes the callbacks.
1827  * The no-CBs CPUs do a wake_up() on their kthread when they insert
1828  * a callback into any empty list, unless the rcu_nocb_poll boot parameter
1829  * has been specified, in which case each kthread actively polls its
1830  * CPU.  (Which isn't so great for energy efficiency, but which does
1831  * reduce RCU's overhead on that CPU.)
1832  *
1833  * This is intended to be used in conjunction with Frederic Weisbecker's
1834  * adaptive-idle work, which would seriously reduce OS jitter on CPUs
1835  * running CPU-bound user-mode computations.
1836  *
1837  * Offloading of callback processing could also in theory be used as
1838  * an energy-efficiency measure because CPUs with no RCU callbacks
1839  * queued are more aggressive about entering dyntick-idle mode.
1840  */
1841 
1842 
1843 /* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
1844 static int __init rcu_nocb_setup(char *str)
1845 {
1846 	alloc_bootmem_cpumask_var(&rcu_nocb_mask);
1847 	have_rcu_nocb_mask = true;
1848 	cpulist_parse(str, rcu_nocb_mask);
1849 	return 1;
1850 }
1851 __setup("rcu_nocbs=", rcu_nocb_setup);
1852 
1853 static int __init parse_rcu_nocb_poll(char *arg)
1854 {
1855 	rcu_nocb_poll = 1;
1856 	return 0;
1857 }
1858 early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
1859 
1860 /*
1861  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
1862  * grace period.
1863  */
1864 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1865 {
1866 	wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
1867 }
1868 
1869 /*
1870  * Set the root rcu_node structure's ->need_future_gp field
1871  * based on the sum of those of all rcu_node structures.  This does
1872  * double-count the root rcu_node structure's requests, but this
1873  * is necessary to handle the possibility of a rcu_nocb_kthread()
1874  * having awakened during the time that the rcu_node structures
1875  * were being updated for the end of the previous grace period.
1876  */
1877 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
1878 {
1879 	rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
1880 }
1881 
1882 static void rcu_init_one_nocb(struct rcu_node *rnp)
1883 {
1884 	init_waitqueue_head(&rnp->nocb_gp_wq[0]);
1885 	init_waitqueue_head(&rnp->nocb_gp_wq[1]);
1886 }
1887 
1888 #ifndef CONFIG_RCU_NOCB_CPU_ALL
1889 /* Is the specified CPU a no-CBs CPU? */
1890 bool rcu_is_nocb_cpu(int cpu)
1891 {
1892 	if (have_rcu_nocb_mask)
1893 		return cpumask_test_cpu(cpu, rcu_nocb_mask);
1894 	return false;
1895 }
1896 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
1897 
1898 /*
1899  * Kick the leader kthread for this NOCB group.
1900  */
1901 static void wake_nocb_leader(struct rcu_data *rdp, bool force)
1902 {
1903 	struct rcu_data *rdp_leader = rdp->nocb_leader;
1904 
1905 	if (!ACCESS_ONCE(rdp_leader->nocb_kthread))
1906 		return;
1907 	if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
1908 		/* Prior smp_mb__after_atomic() orders against prior enqueue. */
1909 		ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
1910 		wake_up(&rdp_leader->nocb_wq);
1911 	}
1912 }
1913 
1914 /*
1915  * Does the specified CPU need an RCU callback for the specified flavor
1916  * of rcu_barrier()?
1917  */
1918 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
1919 {
1920 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1921 	unsigned long ret;
1922 #ifdef CONFIG_PROVE_RCU
1923 	struct rcu_head *rhp;
1924 #endif /* #ifdef CONFIG_PROVE_RCU */
1925 
1926 	/*
1927 	 * Check count of all no-CBs callbacks awaiting invocation.
1928 	 * There needs to be a barrier before this function is called,
1929 	 * but associated with a prior determination that no more
1930 	 * callbacks would be posted.  In the worst case, the first
1931 	 * barrier in _rcu_barrier() suffices (but the caller cannot
1932 	 * necessarily rely on this, not a substitute for the caller
1933 	 * getting the concurrency design right!).  There must also be
1934 	 * a barrier between the following load an posting of a callback
1935 	 * (if a callback is in fact needed).  This is associated with an
1936 	 * atomic_inc() in the caller.
1937 	 */
1938 	ret = atomic_long_read(&rdp->nocb_q_count);
1939 
1940 #ifdef CONFIG_PROVE_RCU
1941 	rhp = ACCESS_ONCE(rdp->nocb_head);
1942 	if (!rhp)
1943 		rhp = ACCESS_ONCE(rdp->nocb_gp_head);
1944 	if (!rhp)
1945 		rhp = ACCESS_ONCE(rdp->nocb_follower_head);
1946 
1947 	/* Having no rcuo kthread but CBs after scheduler starts is bad! */
1948 	if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp) {
1949 		/* RCU callback enqueued before CPU first came online??? */
1950 		pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
1951 		       cpu, rhp->func);
1952 		WARN_ON_ONCE(1);
1953 	}
1954 #endif /* #ifdef CONFIG_PROVE_RCU */
1955 
1956 	return !!ret;
1957 }
1958 
1959 /*
1960  * Enqueue the specified string of rcu_head structures onto the specified
1961  * CPU's no-CBs lists.  The CPU is specified by rdp, the head of the
1962  * string by rhp, and the tail of the string by rhtp.  The non-lazy/lazy
1963  * counts are supplied by rhcount and rhcount_lazy.
1964  *
1965  * If warranted, also wake up the kthread servicing this CPUs queues.
1966  */
1967 static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
1968 				    struct rcu_head *rhp,
1969 				    struct rcu_head **rhtp,
1970 				    int rhcount, int rhcount_lazy,
1971 				    unsigned long flags)
1972 {
1973 	int len;
1974 	struct rcu_head **old_rhpp;
1975 	struct task_struct *t;
1976 
1977 	/* Enqueue the callback on the nocb list and update counts. */
1978 	atomic_long_add(rhcount, &rdp->nocb_q_count);
1979 	/* rcu_barrier() relies on ->nocb_q_count add before xchg. */
1980 	old_rhpp = xchg(&rdp->nocb_tail, rhtp);
1981 	ACCESS_ONCE(*old_rhpp) = rhp;
1982 	atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
1983 	smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
1984 
1985 	/* If we are not being polled and there is a kthread, awaken it ... */
1986 	t = ACCESS_ONCE(rdp->nocb_kthread);
1987 	if (rcu_nocb_poll || !t) {
1988 		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1989 				    TPS("WakeNotPoll"));
1990 		return;
1991 	}
1992 	len = atomic_long_read(&rdp->nocb_q_count);
1993 	if (old_rhpp == &rdp->nocb_head) {
1994 		if (!irqs_disabled_flags(flags)) {
1995 			/* ... if queue was empty ... */
1996 			wake_nocb_leader(rdp, false);
1997 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1998 					    TPS("WakeEmpty"));
1999 		} else {
2000 			rdp->nocb_defer_wakeup = RCU_NOGP_WAKE;
2001 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2002 					    TPS("WakeEmptyIsDeferred"));
2003 		}
2004 		rdp->qlen_last_fqs_check = 0;
2005 	} else if (len > rdp->qlen_last_fqs_check + qhimark) {
2006 		/* ... or if many callbacks queued. */
2007 		if (!irqs_disabled_flags(flags)) {
2008 			wake_nocb_leader(rdp, true);
2009 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2010 					    TPS("WakeOvf"));
2011 		} else {
2012 			rdp->nocb_defer_wakeup = RCU_NOGP_WAKE_FORCE;
2013 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2014 					    TPS("WakeOvfIsDeferred"));
2015 		}
2016 		rdp->qlen_last_fqs_check = LONG_MAX / 2;
2017 	} else {
2018 		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
2019 	}
2020 	return;
2021 }
2022 
2023 /*
2024  * This is a helper for __call_rcu(), which invokes this when the normal
2025  * callback queue is inoperable.  If this is not a no-CBs CPU, this
2026  * function returns failure back to __call_rcu(), which can complain
2027  * appropriately.
2028  *
2029  * Otherwise, this function queues the callback where the corresponding
2030  * "rcuo" kthread can find it.
2031  */
2032 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2033 			    bool lazy, unsigned long flags)
2034 {
2035 
2036 	if (!rcu_is_nocb_cpu(rdp->cpu))
2037 		return false;
2038 	__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
2039 	if (__is_kfree_rcu_offset((unsigned long)rhp->func))
2040 		trace_rcu_kfree_callback(rdp->rsp->name, rhp,
2041 					 (unsigned long)rhp->func,
2042 					 -atomic_long_read(&rdp->nocb_q_count_lazy),
2043 					 -atomic_long_read(&rdp->nocb_q_count));
2044 	else
2045 		trace_rcu_callback(rdp->rsp->name, rhp,
2046 				   -atomic_long_read(&rdp->nocb_q_count_lazy),
2047 				   -atomic_long_read(&rdp->nocb_q_count));
2048 
2049 	/*
2050 	 * If called from an extended quiescent state with interrupts
2051 	 * disabled, invoke the RCU core in order to allow the idle-entry
2052 	 * deferred-wakeup check to function.
2053 	 */
2054 	if (irqs_disabled_flags(flags) &&
2055 	    !rcu_is_watching() &&
2056 	    cpu_online(smp_processor_id()))
2057 		invoke_rcu_core();
2058 
2059 	return true;
2060 }
2061 
2062 /*
2063  * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2064  * not a no-CBs CPU.
2065  */
2066 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2067 						     struct rcu_data *rdp,
2068 						     unsigned long flags)
2069 {
2070 	long ql = rsp->qlen;
2071 	long qll = rsp->qlen_lazy;
2072 
2073 	/* If this is not a no-CBs CPU, tell the caller to do it the old way. */
2074 	if (!rcu_is_nocb_cpu(smp_processor_id()))
2075 		return false;
2076 	rsp->qlen = 0;
2077 	rsp->qlen_lazy = 0;
2078 
2079 	/* First, enqueue the donelist, if any.  This preserves CB ordering. */
2080 	if (rsp->orphan_donelist != NULL) {
2081 		__call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2082 					rsp->orphan_donetail, ql, qll, flags);
2083 		ql = qll = 0;
2084 		rsp->orphan_donelist = NULL;
2085 		rsp->orphan_donetail = &rsp->orphan_donelist;
2086 	}
2087 	if (rsp->orphan_nxtlist != NULL) {
2088 		__call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2089 					rsp->orphan_nxttail, ql, qll, flags);
2090 		ql = qll = 0;
2091 		rsp->orphan_nxtlist = NULL;
2092 		rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2093 	}
2094 	return true;
2095 }
2096 
2097 /*
2098  * If necessary, kick off a new grace period, and either way wait
2099  * for a subsequent grace period to complete.
2100  */
2101 static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2102 {
2103 	unsigned long c;
2104 	bool d;
2105 	unsigned long flags;
2106 	bool needwake;
2107 	struct rcu_node *rnp = rdp->mynode;
2108 
2109 	raw_spin_lock_irqsave(&rnp->lock, flags);
2110 	smp_mb__after_unlock_lock();
2111 	needwake = rcu_start_future_gp(rnp, rdp, &c);
2112 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
2113 	if (needwake)
2114 		rcu_gp_kthread_wake(rdp->rsp);
2115 
2116 	/*
2117 	 * Wait for the grace period.  Do so interruptibly to avoid messing
2118 	 * up the load average.
2119 	 */
2120 	trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
2121 	for (;;) {
2122 		wait_event_interruptible(
2123 			rnp->nocb_gp_wq[c & 0x1],
2124 			(d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
2125 		if (likely(d))
2126 			break;
2127 		WARN_ON(signal_pending(current));
2128 		trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
2129 	}
2130 	trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
2131 	smp_mb(); /* Ensure that CB invocation happens after GP end. */
2132 }
2133 
2134 /*
2135  * Leaders come here to wait for additional callbacks to show up.
2136  * This function does not return until callbacks appear.
2137  */
2138 static void nocb_leader_wait(struct rcu_data *my_rdp)
2139 {
2140 	bool firsttime = true;
2141 	bool gotcbs;
2142 	struct rcu_data *rdp;
2143 	struct rcu_head **tail;
2144 
2145 wait_again:
2146 
2147 	/* Wait for callbacks to appear. */
2148 	if (!rcu_nocb_poll) {
2149 		trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
2150 		wait_event_interruptible(my_rdp->nocb_wq,
2151 				!ACCESS_ONCE(my_rdp->nocb_leader_sleep));
2152 		/* Memory barrier handled by smp_mb() calls below and repoll. */
2153 	} else if (firsttime) {
2154 		firsttime = false; /* Don't drown trace log with "Poll"! */
2155 		trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Poll");
2156 	}
2157 
2158 	/*
2159 	 * Each pass through the following loop checks a follower for CBs.
2160 	 * We are our own first follower.  Any CBs found are moved to
2161 	 * nocb_gp_head, where they await a grace period.
2162 	 */
2163 	gotcbs = false;
2164 	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2165 		rdp->nocb_gp_head = ACCESS_ONCE(rdp->nocb_head);
2166 		if (!rdp->nocb_gp_head)
2167 			continue;  /* No CBs here, try next follower. */
2168 
2169 		/* Move callbacks to wait-for-GP list, which is empty. */
2170 		ACCESS_ONCE(rdp->nocb_head) = NULL;
2171 		rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2172 		gotcbs = true;
2173 	}
2174 
2175 	/*
2176 	 * If there were no callbacks, sleep a bit, rescan after a
2177 	 * memory barrier, and go retry.
2178 	 */
2179 	if (unlikely(!gotcbs)) {
2180 		if (!rcu_nocb_poll)
2181 			trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
2182 					    "WokeEmpty");
2183 		WARN_ON(signal_pending(current));
2184 		schedule_timeout_interruptible(1);
2185 
2186 		/* Rescan in case we were a victim of memory ordering. */
2187 		my_rdp->nocb_leader_sleep = true;
2188 		smp_mb();  /* Ensure _sleep true before scan. */
2189 		for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
2190 			if (ACCESS_ONCE(rdp->nocb_head)) {
2191 				/* Found CB, so short-circuit next wait. */
2192 				my_rdp->nocb_leader_sleep = false;
2193 				break;
2194 			}
2195 		goto wait_again;
2196 	}
2197 
2198 	/* Wait for one grace period. */
2199 	rcu_nocb_wait_gp(my_rdp);
2200 
2201 	/*
2202 	 * We left ->nocb_leader_sleep unset to reduce cache thrashing.
2203 	 * We set it now, but recheck for new callbacks while
2204 	 * traversing our follower list.
2205 	 */
2206 	my_rdp->nocb_leader_sleep = true;
2207 	smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */
2208 
2209 	/* Each pass through the following loop wakes a follower, if needed. */
2210 	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2211 		if (ACCESS_ONCE(rdp->nocb_head))
2212 			my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
2213 		if (!rdp->nocb_gp_head)
2214 			continue; /* No CBs, so no need to wake follower. */
2215 
2216 		/* Append callbacks to follower's "done" list. */
2217 		tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
2218 		*tail = rdp->nocb_gp_head;
2219 		smp_mb__after_atomic(); /* Store *tail before wakeup. */
2220 		if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
2221 			/*
2222 			 * List was empty, wake up the follower.
2223 			 * Memory barriers supplied by atomic_long_add().
2224 			 */
2225 			wake_up(&rdp->nocb_wq);
2226 		}
2227 	}
2228 
2229 	/* If we (the leader) don't have CBs, go wait some more. */
2230 	if (!my_rdp->nocb_follower_head)
2231 		goto wait_again;
2232 }
2233 
2234 /*
2235  * Followers come here to wait for additional callbacks to show up.
2236  * This function does not return until callbacks appear.
2237  */
2238 static void nocb_follower_wait(struct rcu_data *rdp)
2239 {
2240 	bool firsttime = true;
2241 
2242 	for (;;) {
2243 		if (!rcu_nocb_poll) {
2244 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2245 					    "FollowerSleep");
2246 			wait_event_interruptible(rdp->nocb_wq,
2247 						 ACCESS_ONCE(rdp->nocb_follower_head));
2248 		} else if (firsttime) {
2249 			/* Don't drown trace log with "Poll"! */
2250 			firsttime = false;
2251 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "Poll");
2252 		}
2253 		if (smp_load_acquire(&rdp->nocb_follower_head)) {
2254 			/* ^^^ Ensure CB invocation follows _head test. */
2255 			return;
2256 		}
2257 		if (!rcu_nocb_poll)
2258 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2259 					    "WokeEmpty");
2260 		WARN_ON(signal_pending(current));
2261 		schedule_timeout_interruptible(1);
2262 	}
2263 }
2264 
2265 /*
2266  * Per-rcu_data kthread, but only for no-CBs CPUs.  Each kthread invokes
2267  * callbacks queued by the corresponding no-CBs CPU, however, there is
2268  * an optional leader-follower relationship so that the grace-period
2269  * kthreads don't have to do quite so many wakeups.
2270  */
2271 static int rcu_nocb_kthread(void *arg)
2272 {
2273 	int c, cl;
2274 	struct rcu_head *list;
2275 	struct rcu_head *next;
2276 	struct rcu_head **tail;
2277 	struct rcu_data *rdp = arg;
2278 
2279 	/* Each pass through this loop invokes one batch of callbacks */
2280 	for (;;) {
2281 		/* Wait for callbacks. */
2282 		if (rdp->nocb_leader == rdp)
2283 			nocb_leader_wait(rdp);
2284 		else
2285 			nocb_follower_wait(rdp);
2286 
2287 		/* Pull the ready-to-invoke callbacks onto local list. */
2288 		list = ACCESS_ONCE(rdp->nocb_follower_head);
2289 		BUG_ON(!list);
2290 		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
2291 		ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
2292 		tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
2293 
2294 		/* Each pass through the following loop invokes a callback. */
2295 		trace_rcu_batch_start(rdp->rsp->name,
2296 				      atomic_long_read(&rdp->nocb_q_count_lazy),
2297 				      atomic_long_read(&rdp->nocb_q_count), -1);
2298 		c = cl = 0;
2299 		while (list) {
2300 			next = list->next;
2301 			/* Wait for enqueuing to complete, if needed. */
2302 			while (next == NULL && &list->next != tail) {
2303 				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2304 						    TPS("WaitQueue"));
2305 				schedule_timeout_interruptible(1);
2306 				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2307 						    TPS("WokeQueue"));
2308 				next = list->next;
2309 			}
2310 			debug_rcu_head_unqueue(list);
2311 			local_bh_disable();
2312 			if (__rcu_reclaim(rdp->rsp->name, list))
2313 				cl++;
2314 			c++;
2315 			local_bh_enable();
2316 			list = next;
2317 		}
2318 		trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2319 		smp_mb__before_atomic();  /* _add after CB invocation. */
2320 		atomic_long_add(-c, &rdp->nocb_q_count);
2321 		atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
2322 		rdp->n_nocbs_invoked += c;
2323 	}
2324 	return 0;
2325 }
2326 
2327 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
2328 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2329 {
2330 	return ACCESS_ONCE(rdp->nocb_defer_wakeup);
2331 }
2332 
2333 /* Do a deferred wakeup of rcu_nocb_kthread(). */
2334 static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2335 {
2336 	int ndw;
2337 
2338 	if (!rcu_nocb_need_deferred_wakeup(rdp))
2339 		return;
2340 	ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
2341 	ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
2342 	wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
2343 	trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
2344 }
2345 
2346 void __init rcu_init_nohz(void)
2347 {
2348 	int cpu;
2349 	bool need_rcu_nocb_mask = true;
2350 	struct rcu_state *rsp;
2351 
2352 #ifdef CONFIG_RCU_NOCB_CPU_NONE
2353 	need_rcu_nocb_mask = false;
2354 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
2355 
2356 #if defined(CONFIG_NO_HZ_FULL)
2357 	if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
2358 		need_rcu_nocb_mask = true;
2359 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
2360 
2361 	if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
2362 		if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
2363 			pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
2364 			return;
2365 		}
2366 		have_rcu_nocb_mask = true;
2367 	}
2368 	if (!have_rcu_nocb_mask)
2369 		return;
2370 
2371 #ifdef CONFIG_RCU_NOCB_CPU_ZERO
2372 	pr_info("\tOffload RCU callbacks from CPU 0\n");
2373 	cpumask_set_cpu(0, rcu_nocb_mask);
2374 #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
2375 #ifdef CONFIG_RCU_NOCB_CPU_ALL
2376 	pr_info("\tOffload RCU callbacks from all CPUs\n");
2377 	cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
2378 #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
2379 #if defined(CONFIG_NO_HZ_FULL)
2380 	if (tick_nohz_full_running)
2381 		cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
2382 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
2383 
2384 	if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
2385 		pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
2386 		cpumask_and(rcu_nocb_mask, cpu_possible_mask,
2387 			    rcu_nocb_mask);
2388 	}
2389 	pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
2390 		cpumask_pr_args(rcu_nocb_mask));
2391 	if (rcu_nocb_poll)
2392 		pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
2393 
2394 	for_each_rcu_flavor(rsp) {
2395 		for_each_cpu(cpu, rcu_nocb_mask) {
2396 			struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2397 
2398 			/*
2399 			 * If there are early callbacks, they will need
2400 			 * to be moved to the nocb lists.
2401 			 */
2402 			WARN_ON_ONCE(rdp->nxttail[RCU_NEXT_TAIL] !=
2403 				     &rdp->nxtlist &&
2404 				     rdp->nxttail[RCU_NEXT_TAIL] != NULL);
2405 			init_nocb_callback_list(rdp);
2406 		}
2407 		rcu_organize_nocb_kthreads(rsp);
2408 	}
2409 }
2410 
2411 /* Initialize per-rcu_data variables for no-CBs CPUs. */
2412 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2413 {
2414 	rdp->nocb_tail = &rdp->nocb_head;
2415 	init_waitqueue_head(&rdp->nocb_wq);
2416 	rdp->nocb_follower_tail = &rdp->nocb_follower_head;
2417 }
2418 
2419 /*
2420  * If the specified CPU is a no-CBs CPU that does not already have its
2421  * rcuo kthread for the specified RCU flavor, spawn it.  If the CPUs are
2422  * brought online out of order, this can require re-organizing the
2423  * leader-follower relationships.
2424  */
2425 static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
2426 {
2427 	struct rcu_data *rdp;
2428 	struct rcu_data *rdp_last;
2429 	struct rcu_data *rdp_old_leader;
2430 	struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
2431 	struct task_struct *t;
2432 
2433 	/*
2434 	 * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
2435 	 * then nothing to do.
2436 	 */
2437 	if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
2438 		return;
2439 
2440 	/* If we didn't spawn the leader first, reorganize! */
2441 	rdp_old_leader = rdp_spawn->nocb_leader;
2442 	if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
2443 		rdp_last = NULL;
2444 		rdp = rdp_old_leader;
2445 		do {
2446 			rdp->nocb_leader = rdp_spawn;
2447 			if (rdp_last && rdp != rdp_spawn)
2448 				rdp_last->nocb_next_follower = rdp;
2449 			if (rdp == rdp_spawn) {
2450 				rdp = rdp->nocb_next_follower;
2451 			} else {
2452 				rdp_last = rdp;
2453 				rdp = rdp->nocb_next_follower;
2454 				rdp_last->nocb_next_follower = NULL;
2455 			}
2456 		} while (rdp);
2457 		rdp_spawn->nocb_next_follower = rdp_old_leader;
2458 	}
2459 
2460 	/* Spawn the kthread for this CPU and RCU flavor. */
2461 	t = kthread_run(rcu_nocb_kthread, rdp_spawn,
2462 			"rcuo%c/%d", rsp->abbr, cpu);
2463 	BUG_ON(IS_ERR(t));
2464 	ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
2465 }
2466 
2467 /*
2468  * If the specified CPU is a no-CBs CPU that does not already have its
2469  * rcuo kthreads, spawn them.
2470  */
2471 static void rcu_spawn_all_nocb_kthreads(int cpu)
2472 {
2473 	struct rcu_state *rsp;
2474 
2475 	if (rcu_scheduler_fully_active)
2476 		for_each_rcu_flavor(rsp)
2477 			rcu_spawn_one_nocb_kthread(rsp, cpu);
2478 }
2479 
2480 /*
2481  * Once the scheduler is running, spawn rcuo kthreads for all online
2482  * no-CBs CPUs.  This assumes that the early_initcall()s happen before
2483  * non-boot CPUs come online -- if this changes, we will need to add
2484  * some mutual exclusion.
2485  */
2486 static void __init rcu_spawn_nocb_kthreads(void)
2487 {
2488 	int cpu;
2489 
2490 	for_each_online_cpu(cpu)
2491 		rcu_spawn_all_nocb_kthreads(cpu);
2492 }
2493 
2494 /* How many follower CPU IDs per leader?  Default of -1 for sqrt(nr_cpu_ids). */
2495 static int rcu_nocb_leader_stride = -1;
2496 module_param(rcu_nocb_leader_stride, int, 0444);
2497 
2498 /*
2499  * Initialize leader-follower relationships for all no-CBs CPU.
2500  */
2501 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
2502 {
2503 	int cpu;
2504 	int ls = rcu_nocb_leader_stride;
2505 	int nl = 0;  /* Next leader. */
2506 	struct rcu_data *rdp;
2507 	struct rcu_data *rdp_leader = NULL;  /* Suppress misguided gcc warn. */
2508 	struct rcu_data *rdp_prev = NULL;
2509 
2510 	if (!have_rcu_nocb_mask)
2511 		return;
2512 	if (ls == -1) {
2513 		ls = int_sqrt(nr_cpu_ids);
2514 		rcu_nocb_leader_stride = ls;
2515 	}
2516 
2517 	/*
2518 	 * Each pass through this loop sets up one rcu_data structure and
2519 	 * spawns one rcu_nocb_kthread().
2520 	 */
2521 	for_each_cpu(cpu, rcu_nocb_mask) {
2522 		rdp = per_cpu_ptr(rsp->rda, cpu);
2523 		if (rdp->cpu >= nl) {
2524 			/* New leader, set up for followers & next leader. */
2525 			nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
2526 			rdp->nocb_leader = rdp;
2527 			rdp_leader = rdp;
2528 		} else {
2529 			/* Another follower, link to previous leader. */
2530 			rdp->nocb_leader = rdp_leader;
2531 			rdp_prev->nocb_next_follower = rdp;
2532 		}
2533 		rdp_prev = rdp;
2534 	}
2535 }
2536 
2537 /* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
2538 static bool init_nocb_callback_list(struct rcu_data *rdp)
2539 {
2540 	if (!rcu_is_nocb_cpu(rdp->cpu))
2541 		return false;
2542 
2543 	rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2544 	return true;
2545 }
2546 
2547 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
2548 
2549 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2550 {
2551 	WARN_ON_ONCE(1); /* Should be dead code. */
2552 	return false;
2553 }
2554 
2555 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2556 {
2557 }
2558 
2559 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2560 {
2561 }
2562 
2563 static void rcu_init_one_nocb(struct rcu_node *rnp)
2564 {
2565 }
2566 
2567 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2568 			    bool lazy, unsigned long flags)
2569 {
2570 	return false;
2571 }
2572 
2573 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2574 						     struct rcu_data *rdp,
2575 						     unsigned long flags)
2576 {
2577 	return false;
2578 }
2579 
2580 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2581 {
2582 }
2583 
2584 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2585 {
2586 	return false;
2587 }
2588 
2589 static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2590 {
2591 }
2592 
2593 static void rcu_spawn_all_nocb_kthreads(int cpu)
2594 {
2595 }
2596 
2597 static void __init rcu_spawn_nocb_kthreads(void)
2598 {
2599 }
2600 
2601 static bool init_nocb_callback_list(struct rcu_data *rdp)
2602 {
2603 	return false;
2604 }
2605 
2606 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
2607 
2608 /*
2609  * An adaptive-ticks CPU can potentially execute in kernel mode for an
2610  * arbitrarily long period of time with the scheduling-clock tick turned
2611  * off.  RCU will be paying attention to this CPU because it is in the
2612  * kernel, but the CPU cannot be guaranteed to be executing the RCU state
2613  * machine because the scheduling-clock tick has been disabled.  Therefore,
2614  * if an adaptive-ticks CPU is failing to respond to the current grace
2615  * period and has not be idle from an RCU perspective, kick it.
2616  */
2617 static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2618 {
2619 #ifdef CONFIG_NO_HZ_FULL
2620 	if (tick_nohz_full_cpu(cpu))
2621 		smp_send_reschedule(cpu);
2622 #endif /* #ifdef CONFIG_NO_HZ_FULL */
2623 }
2624 
2625 
2626 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
2627 
2628 static int full_sysidle_state;		/* Current system-idle state. */
2629 #define RCU_SYSIDLE_NOT		0	/* Some CPU is not idle. */
2630 #define RCU_SYSIDLE_SHORT	1	/* All CPUs idle for brief period. */
2631 #define RCU_SYSIDLE_LONG	2	/* All CPUs idle for long enough. */
2632 #define RCU_SYSIDLE_FULL	3	/* All CPUs idle, ready for sysidle. */
2633 #define RCU_SYSIDLE_FULL_NOTED	4	/* Actually entered sysidle state. */
2634 
2635 /*
2636  * Invoked to note exit from irq or task transition to idle.  Note that
2637  * usermode execution does -not- count as idle here!  After all, we want
2638  * to detect full-system idle states, not RCU quiescent states and grace
2639  * periods.  The caller must have disabled interrupts.
2640  */
2641 static void rcu_sysidle_enter(int irq)
2642 {
2643 	unsigned long j;
2644 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
2645 
2646 	/* If there are no nohz_full= CPUs, no need to track this. */
2647 	if (!tick_nohz_full_enabled())
2648 		return;
2649 
2650 	/* Adjust nesting, check for fully idle. */
2651 	if (irq) {
2652 		rdtp->dynticks_idle_nesting--;
2653 		WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2654 		if (rdtp->dynticks_idle_nesting != 0)
2655 			return;  /* Still not fully idle. */
2656 	} else {
2657 		if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) ==
2658 		    DYNTICK_TASK_NEST_VALUE) {
2659 			rdtp->dynticks_idle_nesting = 0;
2660 		} else {
2661 			rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE;
2662 			WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2663 			return;  /* Still not fully idle. */
2664 		}
2665 	}
2666 
2667 	/* Record start of fully idle period. */
2668 	j = jiffies;
2669 	ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
2670 	smp_mb__before_atomic();
2671 	atomic_inc(&rdtp->dynticks_idle);
2672 	smp_mb__after_atomic();
2673 	WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
2674 }
2675 
2676 /*
2677  * Unconditionally force exit from full system-idle state.  This is
2678  * invoked when a normal CPU exits idle, but must be called separately
2679  * for the timekeeping CPU (tick_do_timer_cpu).  The reason for this
2680  * is that the timekeeping CPU is permitted to take scheduling-clock
2681  * interrupts while the system is in system-idle state, and of course
2682  * rcu_sysidle_exit() has no way of distinguishing a scheduling-clock
2683  * interrupt from any other type of interrupt.
2684  */
2685 void rcu_sysidle_force_exit(void)
2686 {
2687 	int oldstate = ACCESS_ONCE(full_sysidle_state);
2688 	int newoldstate;
2689 
2690 	/*
2691 	 * Each pass through the following loop attempts to exit full
2692 	 * system-idle state.  If contention proves to be a problem,
2693 	 * a trylock-based contention tree could be used here.
2694 	 */
2695 	while (oldstate > RCU_SYSIDLE_SHORT) {
2696 		newoldstate = cmpxchg(&full_sysidle_state,
2697 				      oldstate, RCU_SYSIDLE_NOT);
2698 		if (oldstate == newoldstate &&
2699 		    oldstate == RCU_SYSIDLE_FULL_NOTED) {
2700 			rcu_kick_nohz_cpu(tick_do_timer_cpu);
2701 			return; /* We cleared it, done! */
2702 		}
2703 		oldstate = newoldstate;
2704 	}
2705 	smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */
2706 }
2707 
2708 /*
2709  * Invoked to note entry to irq or task transition from idle.  Note that
2710  * usermode execution does -not- count as idle here!  The caller must
2711  * have disabled interrupts.
2712  */
2713 static void rcu_sysidle_exit(int irq)
2714 {
2715 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
2716 
2717 	/* If there are no nohz_full= CPUs, no need to track this. */
2718 	if (!tick_nohz_full_enabled())
2719 		return;
2720 
2721 	/* Adjust nesting, check for already non-idle. */
2722 	if (irq) {
2723 		rdtp->dynticks_idle_nesting++;
2724 		WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2725 		if (rdtp->dynticks_idle_nesting != 1)
2726 			return; /* Already non-idle. */
2727 	} else {
2728 		/*
2729 		 * Allow for irq misnesting.  Yes, it really is possible
2730 		 * to enter an irq handler then never leave it, and maybe
2731 		 * also vice versa.  Handle both possibilities.
2732 		 */
2733 		if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) {
2734 			rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE;
2735 			WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2736 			return; /* Already non-idle. */
2737 		} else {
2738 			rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE;
2739 		}
2740 	}
2741 
2742 	/* Record end of idle period. */
2743 	smp_mb__before_atomic();
2744 	atomic_inc(&rdtp->dynticks_idle);
2745 	smp_mb__after_atomic();
2746 	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
2747 
2748 	/*
2749 	 * If we are the timekeeping CPU, we are permitted to be non-idle
2750 	 * during a system-idle state.  This must be the case, because
2751 	 * the timekeeping CPU has to take scheduling-clock interrupts
2752 	 * during the time that the system is transitioning to full
2753 	 * system-idle state.  This means that the timekeeping CPU must
2754 	 * invoke rcu_sysidle_force_exit() directly if it does anything
2755 	 * more than take a scheduling-clock interrupt.
2756 	 */
2757 	if (smp_processor_id() == tick_do_timer_cpu)
2758 		return;
2759 
2760 	/* Update system-idle state: We are clearly no longer fully idle! */
2761 	rcu_sysidle_force_exit();
2762 }
2763 
2764 /*
2765  * Check to see if the current CPU is idle.  Note that usermode execution
2766  * does not count as idle.  The caller must have disabled interrupts.
2767  */
2768 static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2769 				  unsigned long *maxj)
2770 {
2771 	int cur;
2772 	unsigned long j;
2773 	struct rcu_dynticks *rdtp = rdp->dynticks;
2774 
2775 	/* If there are no nohz_full= CPUs, don't check system-wide idleness. */
2776 	if (!tick_nohz_full_enabled())
2777 		return;
2778 
2779 	/*
2780 	 * If some other CPU has already reported non-idle, if this is
2781 	 * not the flavor of RCU that tracks sysidle state, or if this
2782 	 * is an offline or the timekeeping CPU, nothing to do.
2783 	 */
2784 	if (!*isidle || rdp->rsp != rcu_state_p ||
2785 	    cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
2786 		return;
2787 	if (rcu_gp_in_progress(rdp->rsp))
2788 		WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
2789 
2790 	/* Pick up current idle and NMI-nesting counter and check. */
2791 	cur = atomic_read(&rdtp->dynticks_idle);
2792 	if (cur & 0x1) {
2793 		*isidle = false; /* We are not idle! */
2794 		return;
2795 	}
2796 	smp_mb(); /* Read counters before timestamps. */
2797 
2798 	/* Pick up timestamps. */
2799 	j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies);
2800 	/* If this CPU entered idle more recently, update maxj timestamp. */
2801 	if (ULONG_CMP_LT(*maxj, j))
2802 		*maxj = j;
2803 }
2804 
2805 /*
2806  * Is this the flavor of RCU that is handling full-system idle?
2807  */
2808 static bool is_sysidle_rcu_state(struct rcu_state *rsp)
2809 {
2810 	return rsp == rcu_state_p;
2811 }
2812 
2813 /*
2814  * Return a delay in jiffies based on the number of CPUs, rcu_node
2815  * leaf fanout, and jiffies tick rate.  The idea is to allow larger
2816  * systems more time to transition to full-idle state in order to
2817  * avoid the cache thrashing that otherwise occur on the state variable.
2818  * Really small systems (less than a couple of tens of CPUs) should
2819  * instead use a single global atomically incremented counter, and later
2820  * versions of this will automatically reconfigure themselves accordingly.
2821  */
2822 static unsigned long rcu_sysidle_delay(void)
2823 {
2824 	if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2825 		return 0;
2826 	return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000);
2827 }
2828 
2829 /*
2830  * Advance the full-system-idle state.  This is invoked when all of
2831  * the non-timekeeping CPUs are idle.
2832  */
2833 static void rcu_sysidle(unsigned long j)
2834 {
2835 	/* Check the current state. */
2836 	switch (ACCESS_ONCE(full_sysidle_state)) {
2837 	case RCU_SYSIDLE_NOT:
2838 
2839 		/* First time all are idle, so note a short idle period. */
2840 		ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
2841 		break;
2842 
2843 	case RCU_SYSIDLE_SHORT:
2844 
2845 		/*
2846 		 * Idle for a bit, time to advance to next state?
2847 		 * cmpxchg failure means race with non-idle, let them win.
2848 		 */
2849 		if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2850 			(void)cmpxchg(&full_sysidle_state,
2851 				      RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG);
2852 		break;
2853 
2854 	case RCU_SYSIDLE_LONG:
2855 
2856 		/*
2857 		 * Do an additional check pass before advancing to full.
2858 		 * cmpxchg failure means race with non-idle, let them win.
2859 		 */
2860 		if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2861 			(void)cmpxchg(&full_sysidle_state,
2862 				      RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL);
2863 		break;
2864 
2865 	default:
2866 		break;
2867 	}
2868 }
2869 
2870 /*
2871  * Found a non-idle non-timekeeping CPU, so kick the system-idle state
2872  * back to the beginning.
2873  */
2874 static void rcu_sysidle_cancel(void)
2875 {
2876 	smp_mb();
2877 	if (full_sysidle_state > RCU_SYSIDLE_SHORT)
2878 		ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
2879 }
2880 
2881 /*
2882  * Update the sysidle state based on the results of a force-quiescent-state
2883  * scan of the CPUs' dyntick-idle state.
2884  */
2885 static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
2886 			       unsigned long maxj, bool gpkt)
2887 {
2888 	if (rsp != rcu_state_p)
2889 		return;  /* Wrong flavor, ignore. */
2890 	if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2891 		return;  /* Running state machine from timekeeping CPU. */
2892 	if (isidle)
2893 		rcu_sysidle(maxj);    /* More idle! */
2894 	else
2895 		rcu_sysidle_cancel(); /* Idle is over. */
2896 }
2897 
2898 /*
2899  * Wrapper for rcu_sysidle_report() when called from the grace-period
2900  * kthread's context.
2901  */
2902 static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
2903 				  unsigned long maxj)
2904 {
2905 	/* If there are no nohz_full= CPUs, no need to track this. */
2906 	if (!tick_nohz_full_enabled())
2907 		return;
2908 
2909 	rcu_sysidle_report(rsp, isidle, maxj, true);
2910 }
2911 
2912 /* Callback and function for forcing an RCU grace period. */
2913 struct rcu_sysidle_head {
2914 	struct rcu_head rh;
2915 	int inuse;
2916 };
2917 
2918 static void rcu_sysidle_cb(struct rcu_head *rhp)
2919 {
2920 	struct rcu_sysidle_head *rshp;
2921 
2922 	/*
2923 	 * The following memory barrier is needed to replace the
2924 	 * memory barriers that would normally be in the memory
2925 	 * allocator.
2926 	 */
2927 	smp_mb();  /* grace period precedes setting inuse. */
2928 
2929 	rshp = container_of(rhp, struct rcu_sysidle_head, rh);
2930 	ACCESS_ONCE(rshp->inuse) = 0;
2931 }
2932 
2933 /*
2934  * Check to see if the system is fully idle, other than the timekeeping CPU.
2935  * The caller must have disabled interrupts.  This is not intended to be
2936  * called unless tick_nohz_full_enabled().
2937  */
2938 bool rcu_sys_is_idle(void)
2939 {
2940 	static struct rcu_sysidle_head rsh;
2941 	int rss = ACCESS_ONCE(full_sysidle_state);
2942 
2943 	if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
2944 		return false;
2945 
2946 	/* Handle small-system case by doing a full scan of CPUs. */
2947 	if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) {
2948 		int oldrss = rss - 1;
2949 
2950 		/*
2951 		 * One pass to advance to each state up to _FULL.
2952 		 * Give up if any pass fails to advance the state.
2953 		 */
2954 		while (rss < RCU_SYSIDLE_FULL && oldrss < rss) {
2955 			int cpu;
2956 			bool isidle = true;
2957 			unsigned long maxj = jiffies - ULONG_MAX / 4;
2958 			struct rcu_data *rdp;
2959 
2960 			/* Scan all the CPUs looking for nonidle CPUs. */
2961 			for_each_possible_cpu(cpu) {
2962 				rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
2963 				rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
2964 				if (!isidle)
2965 					break;
2966 			}
2967 			rcu_sysidle_report(rcu_state_p, isidle, maxj, false);
2968 			oldrss = rss;
2969 			rss = ACCESS_ONCE(full_sysidle_state);
2970 		}
2971 	}
2972 
2973 	/* If this is the first observation of an idle period, record it. */
2974 	if (rss == RCU_SYSIDLE_FULL) {
2975 		rss = cmpxchg(&full_sysidle_state,
2976 			      RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED);
2977 		return rss == RCU_SYSIDLE_FULL;
2978 	}
2979 
2980 	smp_mb(); /* ensure rss load happens before later caller actions. */
2981 
2982 	/* If already fully idle, tell the caller (in case of races). */
2983 	if (rss == RCU_SYSIDLE_FULL_NOTED)
2984 		return true;
2985 
2986 	/*
2987 	 * If we aren't there yet, and a grace period is not in flight,
2988 	 * initiate a grace period.  Either way, tell the caller that
2989 	 * we are not there yet.  We use an xchg() rather than an assignment
2990 	 * to make up for the memory barriers that would otherwise be
2991 	 * provided by the memory allocator.
2992 	 */
2993 	if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
2994 	    !rcu_gp_in_progress(rcu_state_p) &&
2995 	    !rsh.inuse && xchg(&rsh.inuse, 1) == 0)
2996 		call_rcu(&rsh.rh, rcu_sysidle_cb);
2997 	return false;
2998 }
2999 
3000 /*
3001  * Initialize dynticks sysidle state for CPUs coming online.
3002  */
3003 static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
3004 {
3005 	rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE;
3006 }
3007 
3008 #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3009 
3010 static void rcu_sysidle_enter(int irq)
3011 {
3012 }
3013 
3014 static void rcu_sysidle_exit(int irq)
3015 {
3016 }
3017 
3018 static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
3019 				  unsigned long *maxj)
3020 {
3021 }
3022 
3023 static bool is_sysidle_rcu_state(struct rcu_state *rsp)
3024 {
3025 	return false;
3026 }
3027 
3028 static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
3029 				  unsigned long maxj)
3030 {
3031 }
3032 
3033 static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
3034 {
3035 }
3036 
3037 #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3038 
3039 /*
3040  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
3041  * grace-period kthread will do force_quiescent_state() processing?
3042  * The idea is to avoid waking up RCU core processing on such a
3043  * CPU unless the grace period has extended for too long.
3044  *
3045  * This code relies on the fact that all NO_HZ_FULL CPUs are also
3046  * CONFIG_RCU_NOCB_CPU CPUs.
3047  */
3048 static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
3049 {
3050 #ifdef CONFIG_NO_HZ_FULL
3051 	if (tick_nohz_full_cpu(smp_processor_id()) &&
3052 	    (!rcu_gp_in_progress(rsp) ||
3053 	     ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ)))
3054 		return 1;
3055 #endif /* #ifdef CONFIG_NO_HZ_FULL */
3056 	return 0;
3057 }
3058 
3059 /*
3060  * Bind the grace-period kthread for the sysidle flavor of RCU to the
3061  * timekeeping CPU.
3062  */
3063 static void rcu_bind_gp_kthread(void)
3064 {
3065 	int __maybe_unused cpu;
3066 
3067 	if (!tick_nohz_full_enabled())
3068 		return;
3069 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
3070 	cpu = tick_do_timer_cpu;
3071 	if (cpu >= 0 && cpu < nr_cpu_ids && raw_smp_processor_id() != cpu)
3072 		set_cpus_allowed_ptr(current, cpumask_of(cpu));
3073 #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3074 	if (!is_housekeeping_cpu(raw_smp_processor_id()))
3075 		housekeeping_affine(current);
3076 #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
3077 }
3078 
3079 /* Record the current task on dyntick-idle entry. */
3080 static void rcu_dynticks_task_enter(void)
3081 {
3082 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
3083 	ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
3084 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
3085 }
3086 
3087 /* Record no current task on dyntick-idle exit. */
3088 static void rcu_dynticks_task_exit(void)
3089 {
3090 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
3091 	ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
3092 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
3093 }
3094