xref: /linux/kernel/rcu/tree_nocb.h (revision 5ce42b5de461c3154f61a023b191dd6b77ee66c0)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  * Internal non-public definitions that provide either classic
5  * or preemptible semantics.
6  *
7  * Copyright Red Hat, 2009
8  * Copyright IBM Corporation, 2009
9  * Copyright SUSE, 2021
10  *
11  * Author: Ingo Molnar <mingo@elte.hu>
12  *	   Paul E. McKenney <paulmck@linux.ibm.com>
13  *	   Frederic Weisbecker <frederic@kernel.org>
14  */
15 
16 #ifdef CONFIG_RCU_NOCB_CPU
17 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
18 static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
19 
20 static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
21 {
22 	/* Race on early boot between thread creation and assignment */
23 	if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
24 		return true;
25 
26 	if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
27 		if (in_task())
28 			return true;
29 	return false;
30 }
31 
32 /*
33  * Offload callback processing from the boot-time-specified set of CPUs
34  * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
35  * created that pull the callbacks from the corresponding CPU, wait for
36  * a grace period to elapse, and invoke the callbacks.  These kthreads
37  * are organized into GP kthreads, which manage incoming callbacks, wait for
38  * grace periods, and awaken CB kthreads, and the CB kthreads, which only
39  * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
40  * do a wake_up() on their GP kthread when they insert a callback into any
41  * empty list, unless the rcu_nocb_poll boot parameter has been specified,
42  * in which case each kthread actively polls its CPU.  (Which isn't so great
43  * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
44  *
45  * This is intended to be used in conjunction with Frederic Weisbecker's
46  * adaptive-idle work, which would seriously reduce OS jitter on CPUs
47  * running CPU-bound user-mode computations.
48  *
49  * Offloading of callbacks can also be used as an energy-efficiency
50  * measure because CPUs with no RCU callbacks queued are more aggressive
51  * about entering dyntick-idle mode.
52  */
53 
54 
55 /*
56  * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
57  * If the list is invalid, a warning is emitted and all CPUs are offloaded.
58  */
59 static int __init rcu_nocb_setup(char *str)
60 {
61 	alloc_bootmem_cpumask_var(&rcu_nocb_mask);
62 	if (*str == '=') {
63 		if (cpulist_parse(++str, rcu_nocb_mask)) {
64 			pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
65 			cpumask_setall(rcu_nocb_mask);
66 		}
67 	}
68 	rcu_state.nocb_is_setup = true;
69 	return 1;
70 }
71 __setup("rcu_nocbs", rcu_nocb_setup);
72 
73 static int __init parse_rcu_nocb_poll(char *arg)
74 {
75 	rcu_nocb_poll = true;
76 	return 1;
77 }
78 __setup("rcu_nocb_poll", parse_rcu_nocb_poll);
79 
80 /*
81  * Don't bother bypassing ->cblist if the call_rcu() rate is low.
82  * After all, the main point of bypassing is to avoid lock contention
83  * on ->nocb_lock, which only can happen at high call_rcu() rates.
84  */
85 static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
86 module_param(nocb_nobypass_lim_per_jiffy, int, 0);
87 
88 /*
89  * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
90  * lock isn't immediately available, perform minimal sanity check.
91  */
92 static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
93 	__acquires(&rdp->nocb_bypass_lock)
94 {
95 	lockdep_assert_irqs_disabled();
96 	if (raw_spin_trylock(&rdp->nocb_bypass_lock))
97 		return;
98 	/*
99 	 * Contention expected only when local enqueue collide with
100 	 * remote flush from kthreads.
101 	 */
102 	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
103 	raw_spin_lock(&rdp->nocb_bypass_lock);
104 }
105 
106 /*
107  * Conditionally acquire the specified rcu_data structure's
108  * ->nocb_bypass_lock.
109  */
110 static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
111 {
112 	lockdep_assert_irqs_disabled();
113 	return raw_spin_trylock(&rdp->nocb_bypass_lock);
114 }
115 
116 /*
117  * Release the specified rcu_data structure's ->nocb_bypass_lock.
118  */
119 static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
120 	__releases(&rdp->nocb_bypass_lock)
121 {
122 	lockdep_assert_irqs_disabled();
123 	raw_spin_unlock(&rdp->nocb_bypass_lock);
124 }
125 
126 /*
127  * Acquire the specified rcu_data structure's ->nocb_lock, but only
128  * if it corresponds to a no-CBs CPU.
129  */
130 static void rcu_nocb_lock(struct rcu_data *rdp)
131 {
132 	lockdep_assert_irqs_disabled();
133 	if (!rcu_rdp_is_offloaded(rdp))
134 		return;
135 	raw_spin_lock(&rdp->nocb_lock);
136 }
137 
138 /*
139  * Release the specified rcu_data structure's ->nocb_lock, but only
140  * if it corresponds to a no-CBs CPU.
141  */
142 static void rcu_nocb_unlock(struct rcu_data *rdp)
143 {
144 	if (rcu_rdp_is_offloaded(rdp)) {
145 		lockdep_assert_irqs_disabled();
146 		raw_spin_unlock(&rdp->nocb_lock);
147 	}
148 }
149 
150 /*
151  * Release the specified rcu_data structure's ->nocb_lock and restore
152  * interrupts, but only if it corresponds to a no-CBs CPU.
153  */
154 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
155 				       unsigned long flags)
156 {
157 	if (rcu_rdp_is_offloaded(rdp)) {
158 		lockdep_assert_irqs_disabled();
159 		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
160 	} else {
161 		local_irq_restore(flags);
162 	}
163 }
164 
165 /* Lockdep check that ->cblist may be safely accessed. */
166 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
167 {
168 	lockdep_assert_irqs_disabled();
169 	if (rcu_rdp_is_offloaded(rdp))
170 		lockdep_assert_held(&rdp->nocb_lock);
171 }
172 
173 /*
174  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
175  * grace period.
176  */
177 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
178 {
179 	swake_up_all(sq);
180 }
181 
182 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
183 {
184 	return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
185 }
186 
187 static void rcu_init_one_nocb(struct rcu_node *rnp)
188 {
189 	init_swait_queue_head(&rnp->nocb_gp_wq[0]);
190 	init_swait_queue_head(&rnp->nocb_gp_wq[1]);
191 }
192 
193 static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
194 			   struct rcu_data *rdp,
195 			   bool force, unsigned long flags)
196 	__releases(rdp_gp->nocb_gp_lock)
197 {
198 	bool needwake = false;
199 
200 	if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
201 		raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
202 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
203 				    TPS("AlreadyAwake"));
204 		return false;
205 	}
206 
207 	if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
208 		WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
209 		del_timer(&rdp_gp->nocb_timer);
210 	}
211 
212 	if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
213 		WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
214 		needwake = true;
215 	}
216 	raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
217 	if (needwake) {
218 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
219 		swake_up_one_online(&rdp_gp->nocb_gp_wq);
220 	}
221 
222 	return needwake;
223 }
224 
225 /*
226  * Kick the GP kthread for this NOCB group.
227  */
228 static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
229 {
230 	unsigned long flags;
231 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
232 
233 	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
234 	return __wake_nocb_gp(rdp_gp, rdp, force, flags);
235 }
236 
237 #ifdef CONFIG_RCU_LAZY
238 /*
239  * LAZY_FLUSH_JIFFIES decides the maximum amount of time that
240  * can elapse before lazy callbacks are flushed. Lazy callbacks
241  * could be flushed much earlier for a number of other reasons
242  * however, LAZY_FLUSH_JIFFIES will ensure no lazy callbacks are
243  * left unsubmitted to RCU after those many jiffies.
244  */
245 #define LAZY_FLUSH_JIFFIES (10 * HZ)
246 static unsigned long jiffies_lazy_flush = LAZY_FLUSH_JIFFIES;
247 
248 // To be called only from test code.
249 void rcu_set_jiffies_lazy_flush(unsigned long jif)
250 {
251 	jiffies_lazy_flush = jif;
252 }
253 EXPORT_SYMBOL(rcu_set_jiffies_lazy_flush);
254 
255 unsigned long rcu_get_jiffies_lazy_flush(void)
256 {
257 	return jiffies_lazy_flush;
258 }
259 EXPORT_SYMBOL(rcu_get_jiffies_lazy_flush);
260 #endif
261 
262 /*
263  * Arrange to wake the GP kthread for this NOCB group at some future
264  * time when it is safe to do so.
265  */
266 static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
267 			       const char *reason)
268 {
269 	unsigned long flags;
270 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
271 
272 	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
273 
274 	/*
275 	 * Bypass wakeup overrides previous deferments. In case of
276 	 * callback storms, no need to wake up too early.
277 	 */
278 	if (waketype == RCU_NOCB_WAKE_LAZY &&
279 	    rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) {
280 		mod_timer(&rdp_gp->nocb_timer, jiffies + rcu_get_jiffies_lazy_flush());
281 		WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
282 	} else if (waketype == RCU_NOCB_WAKE_BYPASS) {
283 		mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
284 		WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
285 	} else {
286 		if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
287 			mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
288 		if (rdp_gp->nocb_defer_wakeup < waketype)
289 			WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
290 	}
291 
292 	raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
293 
294 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
295 }
296 
297 /*
298  * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
299  * However, if there is a callback to be enqueued and if ->nocb_bypass
300  * proves to be initially empty, just return false because the no-CB GP
301  * kthread may need to be awakened in this case.
302  *
303  * Return true if there was something to be flushed and it succeeded, otherwise
304  * false.
305  *
306  * Note that this function always returns true if rhp is NULL.
307  */
308 static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp_in,
309 				     unsigned long j, bool lazy)
310 {
311 	struct rcu_cblist rcl;
312 	struct rcu_head *rhp = rhp_in;
313 
314 	WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
315 	rcu_lockdep_assert_cblist_protected(rdp);
316 	lockdep_assert_held(&rdp->nocb_bypass_lock);
317 	if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
318 		raw_spin_unlock(&rdp->nocb_bypass_lock);
319 		return false;
320 	}
321 	/* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
322 	if (rhp)
323 		rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
324 
325 	/*
326 	 * If the new CB requested was a lazy one, queue it onto the main
327 	 * ->cblist so that we can take advantage of the grace-period that will
328 	 * happen regardless. But queue it onto the bypass list first so that
329 	 * the lazy CB is ordered with the existing CBs in the bypass list.
330 	 */
331 	if (lazy && rhp) {
332 		rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
333 		rhp = NULL;
334 	}
335 	rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
336 	WRITE_ONCE(rdp->lazy_len, 0);
337 
338 	rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
339 	WRITE_ONCE(rdp->nocb_bypass_first, j);
340 	rcu_nocb_bypass_unlock(rdp);
341 	return true;
342 }
343 
344 /*
345  * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
346  * However, if there is a callback to be enqueued and if ->nocb_bypass
347  * proves to be initially empty, just return false because the no-CB GP
348  * kthread may need to be awakened in this case.
349  *
350  * Note that this function always returns true if rhp is NULL.
351  */
352 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
353 				  unsigned long j, bool lazy)
354 {
355 	if (!rcu_rdp_is_offloaded(rdp))
356 		return true;
357 	rcu_lockdep_assert_cblist_protected(rdp);
358 	rcu_nocb_bypass_lock(rdp);
359 	return rcu_nocb_do_flush_bypass(rdp, rhp, j, lazy);
360 }
361 
362 /*
363  * If the ->nocb_bypass_lock is immediately available, flush the
364  * ->nocb_bypass queue into ->cblist.
365  */
366 static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
367 {
368 	rcu_lockdep_assert_cblist_protected(rdp);
369 	if (!rcu_rdp_is_offloaded(rdp) ||
370 	    !rcu_nocb_bypass_trylock(rdp))
371 		return;
372 	WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j, false));
373 }
374 
375 /*
376  * See whether it is appropriate to use the ->nocb_bypass list in order
377  * to control contention on ->nocb_lock.  A limited number of direct
378  * enqueues are permitted into ->cblist per jiffy.  If ->nocb_bypass
379  * is non-empty, further callbacks must be placed into ->nocb_bypass,
380  * otherwise rcu_barrier() breaks.  Use rcu_nocb_flush_bypass() to switch
381  * back to direct use of ->cblist.  However, ->nocb_bypass should not be
382  * used if ->cblist is empty, because otherwise callbacks can be stranded
383  * on ->nocb_bypass because we cannot count on the current CPU ever again
384  * invoking call_rcu().  The general rule is that if ->nocb_bypass is
385  * non-empty, the corresponding no-CBs grace-period kthread must not be
386  * in an indefinite sleep state.
387  *
388  * Finally, it is not permitted to use the bypass during early boot,
389  * as doing so would confuse the auto-initialization code.  Besides
390  * which, there is no point in worrying about lock contention while
391  * there is only one CPU in operation.
392  */
393 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
394 				bool *was_alldone, unsigned long flags,
395 				bool lazy)
396 {
397 	unsigned long c;
398 	unsigned long cur_gp_seq;
399 	unsigned long j = jiffies;
400 	long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
401 	bool bypass_is_lazy = (ncbs == READ_ONCE(rdp->lazy_len));
402 
403 	lockdep_assert_irqs_disabled();
404 
405 	// Pure softirq/rcuc based processing: no bypassing, no
406 	// locking.
407 	if (!rcu_rdp_is_offloaded(rdp)) {
408 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
409 		return false;
410 	}
411 
412 	// Don't use ->nocb_bypass during early boot.
413 	if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
414 		rcu_nocb_lock(rdp);
415 		WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
416 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
417 		return false;
418 	}
419 
420 	// If we have advanced to a new jiffy, reset counts to allow
421 	// moving back from ->nocb_bypass to ->cblist.
422 	if (j == rdp->nocb_nobypass_last) {
423 		c = rdp->nocb_nobypass_count + 1;
424 	} else {
425 		WRITE_ONCE(rdp->nocb_nobypass_last, j);
426 		c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
427 		if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
428 				 nocb_nobypass_lim_per_jiffy))
429 			c = 0;
430 		else if (c > nocb_nobypass_lim_per_jiffy)
431 			c = nocb_nobypass_lim_per_jiffy;
432 	}
433 	WRITE_ONCE(rdp->nocb_nobypass_count, c);
434 
435 	// If there hasn't yet been all that many ->cblist enqueues
436 	// this jiffy, tell the caller to enqueue onto ->cblist.  But flush
437 	// ->nocb_bypass first.
438 	// Lazy CBs throttle this back and do immediate bypass queuing.
439 	if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy && !lazy) {
440 		rcu_nocb_lock(rdp);
441 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
442 		if (*was_alldone)
443 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
444 					    TPS("FirstQ"));
445 
446 		WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j, false));
447 		WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
448 		return false; // Caller must enqueue the callback.
449 	}
450 
451 	// If ->nocb_bypass has been used too long or is too full,
452 	// flush ->nocb_bypass to ->cblist.
453 	if ((ncbs && !bypass_is_lazy && j != READ_ONCE(rdp->nocb_bypass_first)) ||
454 	    (ncbs &&  bypass_is_lazy &&
455 	     (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + rcu_get_jiffies_lazy_flush()))) ||
456 	    ncbs >= qhimark) {
457 		rcu_nocb_lock(rdp);
458 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
459 
460 		if (!rcu_nocb_flush_bypass(rdp, rhp, j, lazy)) {
461 			if (*was_alldone)
462 				trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
463 						    TPS("FirstQ"));
464 			WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
465 			return false; // Caller must enqueue the callback.
466 		}
467 		if (j != rdp->nocb_gp_adv_time &&
468 		    rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
469 		    rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
470 			rcu_advance_cbs_nowake(rdp->mynode, rdp);
471 			rdp->nocb_gp_adv_time = j;
472 		}
473 
474 		// The flush succeeded and we moved CBs into the regular list.
475 		// Don't wait for the wake up timer as it may be too far ahead.
476 		// Wake up the GP thread now instead, if the cblist was empty.
477 		__call_rcu_nocb_wake(rdp, *was_alldone, flags);
478 
479 		return true; // Callback already enqueued.
480 	}
481 
482 	// We need to use the bypass.
483 	rcu_nocb_bypass_lock(rdp);
484 	ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
485 	rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
486 	rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
487 
488 	if (lazy)
489 		WRITE_ONCE(rdp->lazy_len, rdp->lazy_len + 1);
490 
491 	if (!ncbs) {
492 		WRITE_ONCE(rdp->nocb_bypass_first, j);
493 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
494 	}
495 	rcu_nocb_bypass_unlock(rdp);
496 
497 	// A wake up of the grace period kthread or timer adjustment
498 	// needs to be done only if:
499 	// 1. Bypass list was fully empty before (this is the first
500 	//    bypass list entry), or:
501 	// 2. Both of these conditions are met:
502 	//    a. The bypass list previously had only lazy CBs, and:
503 	//    b. The new CB is non-lazy.
504 	if (!ncbs || (bypass_is_lazy && !lazy)) {
505 		// No-CBs GP kthread might be indefinitely asleep, if so, wake.
506 		rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
507 		if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
508 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
509 					    TPS("FirstBQwake"));
510 			__call_rcu_nocb_wake(rdp, true, flags);
511 		} else {
512 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
513 					    TPS("FirstBQnoWake"));
514 			rcu_nocb_unlock(rdp);
515 		}
516 	}
517 	return true; // Callback already enqueued.
518 }
519 
520 /*
521  * Awaken the no-CBs grace-period kthread if needed, either due to it
522  * legitimately being asleep or due to overload conditions.
523  *
524  * If warranted, also wake up the kthread servicing this CPUs queues.
525  */
526 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
527 				 unsigned long flags)
528 				 __releases(rdp->nocb_lock)
529 {
530 	long bypass_len;
531 	unsigned long cur_gp_seq;
532 	unsigned long j;
533 	long lazy_len;
534 	long len;
535 	struct task_struct *t;
536 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
537 
538 	// If we are being polled or there is no kthread, just leave.
539 	t = READ_ONCE(rdp->nocb_gp_kthread);
540 	if (rcu_nocb_poll || !t) {
541 		rcu_nocb_unlock(rdp);
542 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
543 				    TPS("WakeNotPoll"));
544 		return;
545 	}
546 	// Need to actually to a wakeup.
547 	len = rcu_segcblist_n_cbs(&rdp->cblist);
548 	bypass_len = rcu_cblist_n_cbs(&rdp->nocb_bypass);
549 	lazy_len = READ_ONCE(rdp->lazy_len);
550 	if (was_alldone) {
551 		rdp->qlen_last_fqs_check = len;
552 		// Only lazy CBs in bypass list
553 		if (lazy_len && bypass_len == lazy_len) {
554 			rcu_nocb_unlock(rdp);
555 			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY,
556 					   TPS("WakeLazy"));
557 		} else if (!irqs_disabled_flags(flags) && cpu_online(rdp->cpu)) {
558 			/* ... if queue was empty ... */
559 			rcu_nocb_unlock(rdp);
560 			wake_nocb_gp(rdp, false);
561 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
562 					    TPS("WakeEmpty"));
563 		} else {
564 			/*
565 			 * Don't do the wake-up upfront on fragile paths.
566 			 * Also offline CPUs can't call swake_up_one_online() from
567 			 * (soft-)IRQs. Rely on the final deferred wake-up from
568 			 * rcutree_report_cpu_dead()
569 			 */
570 			rcu_nocb_unlock(rdp);
571 			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
572 					   TPS("WakeEmptyIsDeferred"));
573 		}
574 	} else if (len > rdp->qlen_last_fqs_check + qhimark) {
575 		/* ... or if many callbacks queued. */
576 		rdp->qlen_last_fqs_check = len;
577 		j = jiffies;
578 		if (j != rdp->nocb_gp_adv_time &&
579 		    rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
580 		    rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
581 			rcu_advance_cbs_nowake(rdp->mynode, rdp);
582 			rdp->nocb_gp_adv_time = j;
583 		}
584 		smp_mb(); /* Enqueue before timer_pending(). */
585 		if ((rdp->nocb_cb_sleep ||
586 		     !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
587 		    !timer_pending(&rdp_gp->nocb_timer)) {
588 			rcu_nocb_unlock(rdp);
589 			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
590 					   TPS("WakeOvfIsDeferred"));
591 		} else {
592 			rcu_nocb_unlock(rdp);
593 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
594 		}
595 	} else {
596 		rcu_nocb_unlock(rdp);
597 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
598 	}
599 }
600 
601 static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
602 			  rcu_callback_t func, unsigned long flags, bool lazy)
603 {
604 	bool was_alldone;
605 
606 	if (!rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) {
607 		/* Not enqueued on bypass but locked, do regular enqueue */
608 		rcutree_enqueue(rdp, head, func);
609 		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
610 	}
611 }
612 
613 static void nocb_gp_toggle_rdp(struct rcu_data *rdp_gp, struct rcu_data *rdp)
614 {
615 	struct rcu_segcblist *cblist = &rdp->cblist;
616 	unsigned long flags;
617 
618 	/*
619 	 * Locking orders future de-offloaded callbacks enqueue against previous
620 	 * handling of this rdp. Ie: Make sure rcuog is done with this rdp before
621 	 * deoffloaded callbacks can be enqueued.
622 	 */
623 	raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
624 	if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
625 		/*
626 		 * Offloading. Set our flag and notify the offload worker.
627 		 * We will handle this rdp until it ever gets de-offloaded.
628 		 */
629 		list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
630 		rcu_segcblist_set_flags(cblist, SEGCBLIST_OFFLOADED);
631 	} else {
632 		/*
633 		 * De-offloading. Clear our flag and notify the de-offload worker.
634 		 * We will ignore this rdp until it ever gets re-offloaded.
635 		 */
636 		list_del(&rdp->nocb_entry_rdp);
637 		rcu_segcblist_clear_flags(cblist, SEGCBLIST_OFFLOADED);
638 	}
639 	raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
640 }
641 
642 static void nocb_gp_sleep(struct rcu_data *my_rdp, int cpu)
643 {
644 	trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
645 	swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
646 					!READ_ONCE(my_rdp->nocb_gp_sleep));
647 	trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
648 }
649 
650 /*
651  * No-CBs GP kthreads come here to wait for additional callbacks to show up
652  * or for grace periods to end.
653  */
654 static void nocb_gp_wait(struct rcu_data *my_rdp)
655 {
656 	bool bypass = false;
657 	int __maybe_unused cpu = my_rdp->cpu;
658 	unsigned long cur_gp_seq;
659 	unsigned long flags;
660 	bool gotcbs = false;
661 	unsigned long j = jiffies;
662 	bool lazy = false;
663 	bool needwait_gp = false; // This prevents actual uninitialized use.
664 	bool needwake;
665 	bool needwake_gp;
666 	struct rcu_data *rdp, *rdp_toggling = NULL;
667 	struct rcu_node *rnp;
668 	unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
669 	bool wasempty = false;
670 
671 	/*
672 	 * Each pass through the following loop checks for CBs and for the
673 	 * nearest grace period (if any) to wait for next.  The CB kthreads
674 	 * and the global grace-period kthread are awakened if needed.
675 	 */
676 	WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
677 	/*
678 	 * An rcu_data structure is removed from the list after its
679 	 * CPU is de-offloaded and added to the list before that CPU is
680 	 * (re-)offloaded.  If the following loop happens to be referencing
681 	 * that rcu_data structure during the time that the corresponding
682 	 * CPU is de-offloaded and then immediately re-offloaded, this
683 	 * loop's rdp pointer will be carried to the end of the list by
684 	 * the resulting pair of list operations.  This can cause the loop
685 	 * to skip over some of the rcu_data structures that were supposed
686 	 * to have been scanned.  Fortunately a new iteration through the
687 	 * entire loop is forced after a given CPU's rcu_data structure
688 	 * is added to the list, so the skipped-over rcu_data structures
689 	 * won't be ignored for long.
690 	 */
691 	list_for_each_entry(rdp, &my_rdp->nocb_head_rdp, nocb_entry_rdp) {
692 		long bypass_ncbs;
693 		bool flush_bypass = false;
694 		long lazy_ncbs;
695 
696 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
697 		rcu_nocb_lock_irqsave(rdp, flags);
698 		lockdep_assert_held(&rdp->nocb_lock);
699 		bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
700 		lazy_ncbs = READ_ONCE(rdp->lazy_len);
701 
702 		if (bypass_ncbs && (lazy_ncbs == bypass_ncbs) &&
703 		    (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + rcu_get_jiffies_lazy_flush()) ||
704 		     bypass_ncbs > 2 * qhimark)) {
705 			flush_bypass = true;
706 		} else if (bypass_ncbs && (lazy_ncbs != bypass_ncbs) &&
707 		    (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
708 		     bypass_ncbs > 2 * qhimark)) {
709 			flush_bypass = true;
710 		} else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
711 			rcu_nocb_unlock_irqrestore(rdp, flags);
712 			continue; /* No callbacks here, try next. */
713 		}
714 
715 		if (flush_bypass) {
716 			// Bypass full or old, so flush it.
717 			(void)rcu_nocb_try_flush_bypass(rdp, j);
718 			bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
719 			lazy_ncbs = READ_ONCE(rdp->lazy_len);
720 		}
721 
722 		if (bypass_ncbs) {
723 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
724 					    bypass_ncbs == lazy_ncbs ? TPS("Lazy") : TPS("Bypass"));
725 			if (bypass_ncbs == lazy_ncbs)
726 				lazy = true;
727 			else
728 				bypass = true;
729 		}
730 		rnp = rdp->mynode;
731 
732 		// Advance callbacks if helpful and low contention.
733 		needwake_gp = false;
734 		if (!rcu_segcblist_restempty(&rdp->cblist,
735 					     RCU_NEXT_READY_TAIL) ||
736 		    (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
737 		     rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
738 			raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
739 			needwake_gp = rcu_advance_cbs(rnp, rdp);
740 			wasempty = rcu_segcblist_restempty(&rdp->cblist,
741 							   RCU_NEXT_READY_TAIL);
742 			raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
743 		}
744 		// Need to wait on some grace period?
745 		WARN_ON_ONCE(wasempty &&
746 			     !rcu_segcblist_restempty(&rdp->cblist,
747 						      RCU_NEXT_READY_TAIL));
748 		if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
749 			if (!needwait_gp ||
750 			    ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
751 				wait_gp_seq = cur_gp_seq;
752 			needwait_gp = true;
753 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
754 					    TPS("NeedWaitGP"));
755 		}
756 		if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
757 			needwake = rdp->nocb_cb_sleep;
758 			WRITE_ONCE(rdp->nocb_cb_sleep, false);
759 		} else {
760 			needwake = false;
761 		}
762 		rcu_nocb_unlock_irqrestore(rdp, flags);
763 		if (needwake) {
764 			swake_up_one(&rdp->nocb_cb_wq);
765 			gotcbs = true;
766 		}
767 		if (needwake_gp)
768 			rcu_gp_kthread_wake();
769 	}
770 
771 	my_rdp->nocb_gp_bypass = bypass;
772 	my_rdp->nocb_gp_gp = needwait_gp;
773 	my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
774 
775 	// At least one child with non-empty ->nocb_bypass, so set
776 	// timer in order to avoid stranding its callbacks.
777 	if (!rcu_nocb_poll) {
778 		// If bypass list only has lazy CBs. Add a deferred lazy wake up.
779 		if (lazy && !bypass) {
780 			wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_LAZY,
781 					TPS("WakeLazyIsDeferred"));
782 		// Otherwise add a deferred bypass wake up.
783 		} else if (bypass) {
784 			wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
785 					TPS("WakeBypassIsDeferred"));
786 		}
787 	}
788 
789 	if (rcu_nocb_poll) {
790 		/* Polling, so trace if first poll in the series. */
791 		if (gotcbs)
792 			trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
793 		if (list_empty(&my_rdp->nocb_head_rdp)) {
794 			raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
795 			if (!my_rdp->nocb_toggling_rdp)
796 				WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
797 			raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
798 			/* Wait for any offloading rdp */
799 			nocb_gp_sleep(my_rdp, cpu);
800 		} else {
801 			schedule_timeout_idle(1);
802 		}
803 	} else if (!needwait_gp) {
804 		/* Wait for callbacks to appear. */
805 		nocb_gp_sleep(my_rdp, cpu);
806 	} else {
807 		rnp = my_rdp->mynode;
808 		trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
809 		swait_event_interruptible_exclusive(
810 			rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
811 			rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
812 			!READ_ONCE(my_rdp->nocb_gp_sleep));
813 		trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
814 	}
815 
816 	if (!rcu_nocb_poll) {
817 		raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
818 		// (De-)queue an rdp to/from the group if its nocb state is changing
819 		rdp_toggling = my_rdp->nocb_toggling_rdp;
820 		if (rdp_toggling)
821 			my_rdp->nocb_toggling_rdp = NULL;
822 
823 		if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
824 			WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
825 			del_timer(&my_rdp->nocb_timer);
826 		}
827 		WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
828 		raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
829 	} else {
830 		rdp_toggling = READ_ONCE(my_rdp->nocb_toggling_rdp);
831 		if (rdp_toggling) {
832 			/*
833 			 * Paranoid locking to make sure nocb_toggling_rdp is well
834 			 * reset *before* we (re)set SEGCBLIST_KTHREAD_GP or we could
835 			 * race with another round of nocb toggling for this rdp.
836 			 * Nocb locking should prevent from that already but we stick
837 			 * to paranoia, especially in rare path.
838 			 */
839 			raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
840 			my_rdp->nocb_toggling_rdp = NULL;
841 			raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
842 		}
843 	}
844 
845 	if (rdp_toggling) {
846 		nocb_gp_toggle_rdp(my_rdp, rdp_toggling);
847 		swake_up_one(&rdp_toggling->nocb_state_wq);
848 	}
849 
850 	my_rdp->nocb_gp_seq = -1;
851 	WARN_ON(signal_pending(current));
852 }
853 
854 /*
855  * No-CBs grace-period-wait kthread.  There is one of these per group
856  * of CPUs, but only once at least one CPU in that group has come online
857  * at least once since boot.  This kthread checks for newly posted
858  * callbacks from any of the CPUs it is responsible for, waits for a
859  * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
860  * that then have callback-invocation work to do.
861  */
862 static int rcu_nocb_gp_kthread(void *arg)
863 {
864 	struct rcu_data *rdp = arg;
865 
866 	for (;;) {
867 		WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
868 		nocb_gp_wait(rdp);
869 		cond_resched_tasks_rcu_qs();
870 	}
871 	return 0;
872 }
873 
874 static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
875 {
876 	return !READ_ONCE(rdp->nocb_cb_sleep) || kthread_should_park();
877 }
878 
879 /*
880  * Invoke any ready callbacks from the corresponding no-CBs CPU,
881  * then, if there are no more, wait for more to appear.
882  */
883 static void nocb_cb_wait(struct rcu_data *rdp)
884 {
885 	struct rcu_segcblist *cblist = &rdp->cblist;
886 	unsigned long cur_gp_seq;
887 	unsigned long flags;
888 	bool needwake_gp = false;
889 	struct rcu_node *rnp = rdp->mynode;
890 
891 	swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
892 					    nocb_cb_wait_cond(rdp));
893 	if (kthread_should_park()) {
894 		kthread_parkme();
895 	} else if (READ_ONCE(rdp->nocb_cb_sleep)) {
896 		WARN_ON(signal_pending(current));
897 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
898 	}
899 
900 	WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
901 
902 	local_irq_save(flags);
903 	rcu_momentary_eqs();
904 	local_irq_restore(flags);
905 	/*
906 	 * Disable BH to provide the expected environment.  Also, when
907 	 * transitioning to/from NOCB mode, a self-requeuing callback might
908 	 * be invoked from softirq.  A short grace period could cause both
909 	 * instances of this callback would execute concurrently.
910 	 */
911 	local_bh_disable();
912 	rcu_do_batch(rdp);
913 	local_bh_enable();
914 	lockdep_assert_irqs_enabled();
915 	rcu_nocb_lock_irqsave(rdp, flags);
916 	if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
917 	    rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
918 	    raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
919 		needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
920 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
921 	}
922 
923 	if (!rcu_segcblist_ready_cbs(cblist)) {
924 		WRITE_ONCE(rdp->nocb_cb_sleep, true);
925 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
926 	} else {
927 		WRITE_ONCE(rdp->nocb_cb_sleep, false);
928 	}
929 
930 	rcu_nocb_unlock_irqrestore(rdp, flags);
931 	if (needwake_gp)
932 		rcu_gp_kthread_wake();
933 }
934 
935 /*
936  * Per-rcu_data kthread, but only for no-CBs CPUs.  Repeatedly invoke
937  * nocb_cb_wait() to do the dirty work.
938  */
939 static int rcu_nocb_cb_kthread(void *arg)
940 {
941 	struct rcu_data *rdp = arg;
942 
943 	// Each pass through this loop does one callback batch, and,
944 	// if there are no more ready callbacks, waits for them.
945 	for (;;) {
946 		nocb_cb_wait(rdp);
947 		cond_resched_tasks_rcu_qs();
948 	}
949 	return 0;
950 }
951 
952 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
953 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
954 {
955 	return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
956 }
957 
958 /* Do a deferred wakeup of rcu_nocb_kthread(). */
959 static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
960 					   struct rcu_data *rdp, int level,
961 					   unsigned long flags)
962 	__releases(rdp_gp->nocb_gp_lock)
963 {
964 	int ndw;
965 	int ret;
966 
967 	if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
968 		raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
969 		return false;
970 	}
971 
972 	ndw = rdp_gp->nocb_defer_wakeup;
973 	ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
974 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
975 
976 	return ret;
977 }
978 
979 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
980 static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
981 {
982 	unsigned long flags;
983 	struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
984 
985 	WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
986 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
987 
988 	raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
989 	smp_mb__after_spinlock(); /* Timer expire before wakeup. */
990 	do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
991 }
992 
993 /*
994  * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
995  * This means we do an inexact common-case check.  Note that if
996  * we miss, ->nocb_timer will eventually clean things up.
997  */
998 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
999 {
1000 	unsigned long flags;
1001 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1002 
1003 	if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
1004 		return false;
1005 
1006 	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
1007 	return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
1008 }
1009 
1010 void rcu_nocb_flush_deferred_wakeup(void)
1011 {
1012 	do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
1013 }
1014 EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
1015 
1016 static int rcu_nocb_queue_toggle_rdp(struct rcu_data *rdp)
1017 {
1018 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1019 	bool wake_gp = false;
1020 	unsigned long flags;
1021 
1022 	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
1023 	// Queue this rdp for add/del to/from the list to iterate on rcuog
1024 	WRITE_ONCE(rdp_gp->nocb_toggling_rdp, rdp);
1025 	if (rdp_gp->nocb_gp_sleep) {
1026 		rdp_gp->nocb_gp_sleep = false;
1027 		wake_gp = true;
1028 	}
1029 	raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
1030 
1031 	return wake_gp;
1032 }
1033 
1034 static bool rcu_nocb_rdp_deoffload_wait_cond(struct rcu_data *rdp)
1035 {
1036 	unsigned long flags;
1037 	bool ret;
1038 
1039 	/*
1040 	 * Locking makes sure rcuog is done handling this rdp before deoffloaded
1041 	 * enqueue can happen. Also it keeps the SEGCBLIST_OFFLOADED flag stable
1042 	 * while the ->nocb_lock is held.
1043 	 */
1044 	raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1045 	ret = !rcu_segcblist_test_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
1046 	raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1047 
1048 	return ret;
1049 }
1050 
1051 static int rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
1052 {
1053 	unsigned long flags;
1054 	int wake_gp;
1055 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1056 
1057 	/* CPU must be offline, unless it's early boot */
1058 	WARN_ON_ONCE(cpu_online(rdp->cpu) && rdp->cpu != raw_smp_processor_id());
1059 
1060 	pr_info("De-offloading %d\n", rdp->cpu);
1061 
1062 	/* Flush all callbacks from segcblist and bypass */
1063 	rcu_barrier();
1064 
1065 	/*
1066 	 * Make sure the rcuoc kthread isn't in the middle of a nocb locked
1067 	 * sequence while offloading is deactivated, along with nocb locking.
1068 	 */
1069 	if (rdp->nocb_cb_kthread)
1070 		kthread_park(rdp->nocb_cb_kthread);
1071 
1072 	rcu_nocb_lock_irqsave(rdp, flags);
1073 	WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
1074 	WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist));
1075 	rcu_nocb_unlock_irqrestore(rdp, flags);
1076 
1077 	wake_gp = rcu_nocb_queue_toggle_rdp(rdp);
1078 
1079 	mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
1080 
1081 	if (rdp_gp->nocb_gp_kthread) {
1082 		if (wake_gp)
1083 			wake_up_process(rdp_gp->nocb_gp_kthread);
1084 
1085 		swait_event_exclusive(rdp->nocb_state_wq,
1086 				      rcu_nocb_rdp_deoffload_wait_cond(rdp));
1087 	} else {
1088 		/*
1089 		 * No kthread to clear the flags for us or remove the rdp from the nocb list
1090 		 * to iterate. Do it here instead. Locking doesn't look stricly necessary
1091 		 * but we stick to paranoia in this rare path.
1092 		 */
1093 		raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1094 		rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
1095 		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1096 
1097 		list_del(&rdp->nocb_entry_rdp);
1098 	}
1099 
1100 	mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
1101 
1102 	return 0;
1103 }
1104 
1105 int rcu_nocb_cpu_deoffload(int cpu)
1106 {
1107 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1108 	int ret = 0;
1109 
1110 	cpus_read_lock();
1111 	mutex_lock(&rcu_state.nocb_mutex);
1112 	if (rcu_rdp_is_offloaded(rdp)) {
1113 		if (!cpu_online(cpu)) {
1114 			ret = rcu_nocb_rdp_deoffload(rdp);
1115 			if (!ret)
1116 				cpumask_clear_cpu(cpu, rcu_nocb_mask);
1117 		} else {
1118 			pr_info("NOCB: Cannot CB-deoffload online CPU %d\n", rdp->cpu);
1119 			ret = -EINVAL;
1120 		}
1121 	}
1122 	mutex_unlock(&rcu_state.nocb_mutex);
1123 	cpus_read_unlock();
1124 
1125 	return ret;
1126 }
1127 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
1128 
1129 static bool rcu_nocb_rdp_offload_wait_cond(struct rcu_data *rdp)
1130 {
1131 	unsigned long flags;
1132 	bool ret;
1133 
1134 	raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1135 	ret = rcu_segcblist_test_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
1136 	raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1137 
1138 	return ret;
1139 }
1140 
1141 static int rcu_nocb_rdp_offload(struct rcu_data *rdp)
1142 {
1143 	int wake_gp;
1144 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1145 
1146 	WARN_ON_ONCE(cpu_online(rdp->cpu));
1147 	/*
1148 	 * For now we only support re-offload, ie: the rdp must have been
1149 	 * offloaded on boot first.
1150 	 */
1151 	if (!rdp->nocb_gp_rdp)
1152 		return -EINVAL;
1153 
1154 	if (WARN_ON_ONCE(!rdp_gp->nocb_gp_kthread))
1155 		return -EINVAL;
1156 
1157 	pr_info("Offloading %d\n", rdp->cpu);
1158 
1159 	WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
1160 	WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist));
1161 
1162 	wake_gp = rcu_nocb_queue_toggle_rdp(rdp);
1163 	if (wake_gp)
1164 		wake_up_process(rdp_gp->nocb_gp_kthread);
1165 
1166 	swait_event_exclusive(rdp->nocb_state_wq,
1167 			      rcu_nocb_rdp_offload_wait_cond(rdp));
1168 
1169 	kthread_unpark(rdp->nocb_cb_kthread);
1170 
1171 	return 0;
1172 }
1173 
1174 int rcu_nocb_cpu_offload(int cpu)
1175 {
1176 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1177 	int ret = 0;
1178 
1179 	cpus_read_lock();
1180 	mutex_lock(&rcu_state.nocb_mutex);
1181 	if (!rcu_rdp_is_offloaded(rdp)) {
1182 		if (!cpu_online(cpu)) {
1183 			ret = rcu_nocb_rdp_offload(rdp);
1184 			if (!ret)
1185 				cpumask_set_cpu(cpu, rcu_nocb_mask);
1186 		} else {
1187 			pr_info("NOCB: Cannot CB-offload online CPU %d\n", rdp->cpu);
1188 			ret = -EINVAL;
1189 		}
1190 	}
1191 	mutex_unlock(&rcu_state.nocb_mutex);
1192 	cpus_read_unlock();
1193 
1194 	return ret;
1195 }
1196 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
1197 
1198 #ifdef CONFIG_RCU_LAZY
1199 static unsigned long
1200 lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1201 {
1202 	int cpu;
1203 	unsigned long count = 0;
1204 
1205 	if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask)))
1206 		return 0;
1207 
1208 	/*  Protect rcu_nocb_mask against concurrent (de-)offloading. */
1209 	if (!mutex_trylock(&rcu_state.nocb_mutex))
1210 		return 0;
1211 
1212 	/* Snapshot count of all CPUs */
1213 	for_each_cpu(cpu, rcu_nocb_mask) {
1214 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1215 
1216 		count +=  READ_ONCE(rdp->lazy_len);
1217 	}
1218 
1219 	mutex_unlock(&rcu_state.nocb_mutex);
1220 
1221 	return count ? count : SHRINK_EMPTY;
1222 }
1223 
1224 static unsigned long
1225 lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1226 {
1227 	int cpu;
1228 	unsigned long flags;
1229 	unsigned long count = 0;
1230 
1231 	if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask)))
1232 		return 0;
1233 	/*
1234 	 * Protect against concurrent (de-)offloading. Otherwise nocb locking
1235 	 * may be ignored or imbalanced.
1236 	 */
1237 	if (!mutex_trylock(&rcu_state.nocb_mutex)) {
1238 		/*
1239 		 * But really don't insist if nocb_mutex is contended since we
1240 		 * can't guarantee that it will never engage in a dependency
1241 		 * chain involving memory allocation. The lock is seldom contended
1242 		 * anyway.
1243 		 */
1244 		return 0;
1245 	}
1246 
1247 	/* Snapshot count of all CPUs */
1248 	for_each_cpu(cpu, rcu_nocb_mask) {
1249 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1250 		int _count;
1251 
1252 		if (WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp)))
1253 			continue;
1254 
1255 		if (!READ_ONCE(rdp->lazy_len))
1256 			continue;
1257 
1258 		rcu_nocb_lock_irqsave(rdp, flags);
1259 		/*
1260 		 * Recheck under the nocb lock. Since we are not holding the bypass
1261 		 * lock we may still race with increments from the enqueuer but still
1262 		 * we know for sure if there is at least one lazy callback.
1263 		 */
1264 		_count = READ_ONCE(rdp->lazy_len);
1265 		if (!_count) {
1266 			rcu_nocb_unlock_irqrestore(rdp, flags);
1267 			continue;
1268 		}
1269 		rcu_nocb_try_flush_bypass(rdp, jiffies);
1270 		rcu_nocb_unlock_irqrestore(rdp, flags);
1271 		wake_nocb_gp(rdp, false);
1272 		sc->nr_to_scan -= _count;
1273 		count += _count;
1274 		if (sc->nr_to_scan <= 0)
1275 			break;
1276 	}
1277 
1278 	mutex_unlock(&rcu_state.nocb_mutex);
1279 
1280 	return count ? count : SHRINK_STOP;
1281 }
1282 #endif // #ifdef CONFIG_RCU_LAZY
1283 
1284 void __init rcu_init_nohz(void)
1285 {
1286 	int cpu;
1287 	struct rcu_data *rdp;
1288 	const struct cpumask *cpumask = NULL;
1289 	struct shrinker * __maybe_unused lazy_rcu_shrinker;
1290 
1291 #if defined(CONFIG_NO_HZ_FULL)
1292 	if (tick_nohz_full_running && !cpumask_empty(tick_nohz_full_mask))
1293 		cpumask = tick_nohz_full_mask;
1294 #endif
1295 
1296 	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_DEFAULT_ALL) &&
1297 	    !rcu_state.nocb_is_setup && !cpumask)
1298 		cpumask = cpu_possible_mask;
1299 
1300 	if (cpumask) {
1301 		if (!cpumask_available(rcu_nocb_mask)) {
1302 			if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
1303 				pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
1304 				return;
1305 			}
1306 		}
1307 
1308 		cpumask_or(rcu_nocb_mask, rcu_nocb_mask, cpumask);
1309 		rcu_state.nocb_is_setup = true;
1310 	}
1311 
1312 	if (!rcu_state.nocb_is_setup)
1313 		return;
1314 
1315 #ifdef CONFIG_RCU_LAZY
1316 	lazy_rcu_shrinker = shrinker_alloc(0, "rcu-lazy");
1317 	if (!lazy_rcu_shrinker) {
1318 		pr_err("Failed to allocate lazy_rcu shrinker!\n");
1319 	} else {
1320 		lazy_rcu_shrinker->count_objects = lazy_rcu_shrink_count;
1321 		lazy_rcu_shrinker->scan_objects = lazy_rcu_shrink_scan;
1322 
1323 		shrinker_register(lazy_rcu_shrinker);
1324 	}
1325 #endif // #ifdef CONFIG_RCU_LAZY
1326 
1327 	if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
1328 		pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
1329 		cpumask_and(rcu_nocb_mask, cpu_possible_mask,
1330 			    rcu_nocb_mask);
1331 	}
1332 	if (cpumask_empty(rcu_nocb_mask))
1333 		pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
1334 	else
1335 		pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
1336 			cpumask_pr_args(rcu_nocb_mask));
1337 	if (rcu_nocb_poll)
1338 		pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
1339 
1340 	for_each_cpu(cpu, rcu_nocb_mask) {
1341 		rdp = per_cpu_ptr(&rcu_data, cpu);
1342 		if (rcu_segcblist_empty(&rdp->cblist))
1343 			rcu_segcblist_init(&rdp->cblist);
1344 		rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
1345 	}
1346 	rcu_organize_nocb_kthreads();
1347 }
1348 
1349 /* Initialize per-rcu_data variables for no-CBs CPUs. */
1350 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1351 {
1352 	init_swait_queue_head(&rdp->nocb_cb_wq);
1353 	init_swait_queue_head(&rdp->nocb_gp_wq);
1354 	init_swait_queue_head(&rdp->nocb_state_wq);
1355 	raw_spin_lock_init(&rdp->nocb_lock);
1356 	raw_spin_lock_init(&rdp->nocb_bypass_lock);
1357 	raw_spin_lock_init(&rdp->nocb_gp_lock);
1358 	timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
1359 	rcu_cblist_init(&rdp->nocb_bypass);
1360 	WRITE_ONCE(rdp->lazy_len, 0);
1361 	mutex_init(&rdp->nocb_gp_kthread_mutex);
1362 }
1363 
1364 /*
1365  * If the specified CPU is a no-CBs CPU that does not already have its
1366  * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
1367  * for this CPU's group has not yet been created, spawn it as well.
1368  */
1369 static void rcu_spawn_cpu_nocb_kthread(int cpu)
1370 {
1371 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1372 	struct rcu_data *rdp_gp;
1373 	struct task_struct *t;
1374 	struct sched_param sp;
1375 
1376 	if (!rcu_scheduler_fully_active || !rcu_state.nocb_is_setup)
1377 		return;
1378 
1379 	/* If there already is an rcuo kthread, then nothing to do. */
1380 	if (rdp->nocb_cb_kthread)
1381 		return;
1382 
1383 	/* If we didn't spawn the GP kthread first, reorganize! */
1384 	sp.sched_priority = kthread_prio;
1385 	rdp_gp = rdp->nocb_gp_rdp;
1386 	mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
1387 	if (!rdp_gp->nocb_gp_kthread) {
1388 		t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
1389 				"rcuog/%d", rdp_gp->cpu);
1390 		if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) {
1391 			mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
1392 			goto err;
1393 		}
1394 		WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
1395 		if (kthread_prio)
1396 			sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1397 	}
1398 	mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
1399 
1400 	/* Spawn the kthread for this CPU. */
1401 	t = kthread_create(rcu_nocb_cb_kthread, rdp,
1402 			   "rcuo%c/%d", rcu_state.abbr, cpu);
1403 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
1404 		goto err;
1405 
1406 	if (rcu_rdp_is_offloaded(rdp))
1407 		wake_up_process(t);
1408 	else
1409 		kthread_park(t);
1410 
1411 	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_CB_BOOST) && kthread_prio)
1412 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1413 
1414 	WRITE_ONCE(rdp->nocb_cb_kthread, t);
1415 	WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
1416 	return;
1417 
1418 err:
1419 	/*
1420 	 * No need to protect against concurrent rcu_barrier()
1421 	 * because the number of callbacks should be 0 for a non-boot CPU,
1422 	 * therefore rcu_barrier() shouldn't even try to grab the nocb_lock.
1423 	 * But hold nocb_mutex to avoid nocb_lock imbalance from shrinker.
1424 	 */
1425 	WARN_ON_ONCE(system_state > SYSTEM_BOOTING && rcu_segcblist_n_cbs(&rdp->cblist));
1426 	mutex_lock(&rcu_state.nocb_mutex);
1427 	if (rcu_rdp_is_offloaded(rdp)) {
1428 		rcu_nocb_rdp_deoffload(rdp);
1429 		cpumask_clear_cpu(cpu, rcu_nocb_mask);
1430 	}
1431 	mutex_unlock(&rcu_state.nocb_mutex);
1432 }
1433 
1434 /* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
1435 static int rcu_nocb_gp_stride = -1;
1436 module_param(rcu_nocb_gp_stride, int, 0444);
1437 
1438 /*
1439  * Initialize GP-CB relationships for all no-CBs CPU.
1440  */
1441 static void __init rcu_organize_nocb_kthreads(void)
1442 {
1443 	int cpu;
1444 	bool firsttime = true;
1445 	bool gotnocbs = false;
1446 	bool gotnocbscbs = true;
1447 	int ls = rcu_nocb_gp_stride;
1448 	int nl = 0;  /* Next GP kthread. */
1449 	struct rcu_data *rdp;
1450 	struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
1451 
1452 	if (!cpumask_available(rcu_nocb_mask))
1453 		return;
1454 	if (ls == -1) {
1455 		ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
1456 		rcu_nocb_gp_stride = ls;
1457 	}
1458 
1459 	/*
1460 	 * Each pass through this loop sets up one rcu_data structure.
1461 	 * Should the corresponding CPU come online in the future, then
1462 	 * we will spawn the needed set of rcu_nocb_kthread() kthreads.
1463 	 */
1464 	for_each_possible_cpu(cpu) {
1465 		rdp = per_cpu_ptr(&rcu_data, cpu);
1466 		if (rdp->cpu >= nl) {
1467 			/* New GP kthread, set up for CBs & next GP. */
1468 			gotnocbs = true;
1469 			nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
1470 			rdp_gp = rdp;
1471 			INIT_LIST_HEAD(&rdp->nocb_head_rdp);
1472 			if (dump_tree) {
1473 				if (!firsttime)
1474 					pr_cont("%s\n", gotnocbscbs
1475 							? "" : " (self only)");
1476 				gotnocbscbs = false;
1477 				firsttime = false;
1478 				pr_alert("%s: No-CB GP kthread CPU %d:",
1479 					 __func__, cpu);
1480 			}
1481 		} else {
1482 			/* Another CB kthread, link to previous GP kthread. */
1483 			gotnocbscbs = true;
1484 			if (dump_tree)
1485 				pr_cont(" %d", cpu);
1486 		}
1487 		rdp->nocb_gp_rdp = rdp_gp;
1488 		if (cpumask_test_cpu(cpu, rcu_nocb_mask))
1489 			list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
1490 	}
1491 	if (gotnocbs && dump_tree)
1492 		pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
1493 }
1494 
1495 /*
1496  * Bind the current task to the offloaded CPUs.  If there are no offloaded
1497  * CPUs, leave the task unbound.  Splat if the bind attempt fails.
1498  */
1499 void rcu_bind_current_to_nocb(void)
1500 {
1501 	if (cpumask_available(rcu_nocb_mask) && !cpumask_empty(rcu_nocb_mask))
1502 		WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
1503 }
1504 EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
1505 
1506 // The ->on_cpu field is available only in CONFIG_SMP=y, so...
1507 #ifdef CONFIG_SMP
1508 static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1509 {
1510 	return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
1511 }
1512 #else // #ifdef CONFIG_SMP
1513 static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1514 {
1515 	return "";
1516 }
1517 #endif // #else #ifdef CONFIG_SMP
1518 
1519 /*
1520  * Dump out nocb grace-period kthread state for the specified rcu_data
1521  * structure.
1522  */
1523 static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
1524 {
1525 	struct rcu_node *rnp = rdp->mynode;
1526 
1527 	pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
1528 		rdp->cpu,
1529 		"kK"[!!rdp->nocb_gp_kthread],
1530 		"lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
1531 		"dD"[!!rdp->nocb_defer_wakeup],
1532 		"tT"[timer_pending(&rdp->nocb_timer)],
1533 		"sS"[!!rdp->nocb_gp_sleep],
1534 		".W"[swait_active(&rdp->nocb_gp_wq)],
1535 		".W"[swait_active(&rnp->nocb_gp_wq[0])],
1536 		".W"[swait_active(&rnp->nocb_gp_wq[1])],
1537 		".B"[!!rdp->nocb_gp_bypass],
1538 		".G"[!!rdp->nocb_gp_gp],
1539 		(long)rdp->nocb_gp_seq,
1540 		rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
1541 		rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
1542 		rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
1543 		show_rcu_should_be_on_cpu(rdp->nocb_gp_kthread));
1544 }
1545 
1546 /* Dump out nocb kthread state for the specified rcu_data structure. */
1547 static void show_rcu_nocb_state(struct rcu_data *rdp)
1548 {
1549 	char bufw[20];
1550 	char bufr[20];
1551 	struct rcu_data *nocb_next_rdp;
1552 	struct rcu_segcblist *rsclp = &rdp->cblist;
1553 	bool waslocked;
1554 	bool wassleep;
1555 
1556 	if (rdp->nocb_gp_rdp == rdp)
1557 		show_rcu_nocb_gp_state(rdp);
1558 
1559 	nocb_next_rdp = list_next_or_null_rcu(&rdp->nocb_gp_rdp->nocb_head_rdp,
1560 					      &rdp->nocb_entry_rdp,
1561 					      typeof(*rdp),
1562 					      nocb_entry_rdp);
1563 
1564 	sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
1565 	sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
1566 	pr_info("   CB %d^%d->%d %c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
1567 		rdp->cpu, rdp->nocb_gp_rdp->cpu,
1568 		nocb_next_rdp ? nocb_next_rdp->cpu : -1,
1569 		"kK"[!!rdp->nocb_cb_kthread],
1570 		"bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
1571 		"lL"[raw_spin_is_locked(&rdp->nocb_lock)],
1572 		"sS"[!!rdp->nocb_cb_sleep],
1573 		".W"[swait_active(&rdp->nocb_cb_wq)],
1574 		jiffies - rdp->nocb_bypass_first,
1575 		jiffies - rdp->nocb_nobypass_last,
1576 		rdp->nocb_nobypass_count,
1577 		".D"[rcu_segcblist_ready_cbs(rsclp)],
1578 		".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
1579 		rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
1580 		".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
1581 		rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
1582 		".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
1583 		".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
1584 		rcu_segcblist_n_cbs(&rdp->cblist),
1585 		rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
1586 		rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1,
1587 		show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
1588 
1589 	/* It is OK for GP kthreads to have GP state. */
1590 	if (rdp->nocb_gp_rdp == rdp)
1591 		return;
1592 
1593 	waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
1594 	wassleep = swait_active(&rdp->nocb_gp_wq);
1595 	if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
1596 		return;  /* Nothing untoward. */
1597 
1598 	pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
1599 		"lL"[waslocked],
1600 		"dD"[!!rdp->nocb_defer_wakeup],
1601 		"sS"[!!rdp->nocb_gp_sleep],
1602 		".W"[wassleep]);
1603 }
1604 
1605 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
1606 
1607 /* No ->nocb_lock to acquire.  */
1608 static void rcu_nocb_lock(struct rcu_data *rdp)
1609 {
1610 }
1611 
1612 /* No ->nocb_lock to release.  */
1613 static void rcu_nocb_unlock(struct rcu_data *rdp)
1614 {
1615 }
1616 
1617 /* No ->nocb_lock to release.  */
1618 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
1619 				       unsigned long flags)
1620 {
1621 	local_irq_restore(flags);
1622 }
1623 
1624 /* Lockdep check that ->cblist may be safely accessed. */
1625 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
1626 {
1627 	lockdep_assert_irqs_disabled();
1628 }
1629 
1630 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1631 {
1632 }
1633 
1634 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1635 {
1636 	return NULL;
1637 }
1638 
1639 static void rcu_init_one_nocb(struct rcu_node *rnp)
1640 {
1641 }
1642 
1643 static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
1644 {
1645 	return false;
1646 }
1647 
1648 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1649 				  unsigned long j, bool lazy)
1650 {
1651 	return true;
1652 }
1653 
1654 static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
1655 			  rcu_callback_t func, unsigned long flags, bool lazy)
1656 {
1657 	WARN_ON_ONCE(1);  /* Should be dead code! */
1658 }
1659 
1660 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
1661 				 unsigned long flags)
1662 {
1663 	WARN_ON_ONCE(1);  /* Should be dead code! */
1664 }
1665 
1666 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1667 {
1668 }
1669 
1670 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
1671 {
1672 	return false;
1673 }
1674 
1675 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
1676 {
1677 	return false;
1678 }
1679 
1680 static void rcu_spawn_cpu_nocb_kthread(int cpu)
1681 {
1682 }
1683 
1684 static void show_rcu_nocb_state(struct rcu_data *rdp)
1685 {
1686 }
1687 
1688 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
1689