xref: /linux/kernel/rcu/tree_nocb.h (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  * Internal non-public definitions that provide either classic
5  * or preemptible semantics.
6  *
7  * Copyright Red Hat, 2009
8  * Copyright IBM Corporation, 2009
9  * Copyright SUSE, 2021
10  *
11  * Author: Ingo Molnar <mingo@elte.hu>
12  *	   Paul E. McKenney <paulmck@linux.ibm.com>
13  *	   Frederic Weisbecker <frederic@kernel.org>
14  */
15 
16 #ifdef CONFIG_RCU_NOCB_CPU
17 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
18 static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
19 
rcu_current_is_nocb_kthread(struct rcu_data * rdp)20 static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
21 {
22 	/* Race on early boot between thread creation and assignment */
23 	if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
24 		return true;
25 
26 	if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
27 		if (in_task())
28 			return true;
29 	return false;
30 }
31 
32 /*
33  * Offload callback processing from the boot-time-specified set of CPUs
34  * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
35  * created that pull the callbacks from the corresponding CPU, wait for
36  * a grace period to elapse, and invoke the callbacks.  These kthreads
37  * are organized into GP kthreads, which manage incoming callbacks, wait for
38  * grace periods, and awaken CB kthreads, and the CB kthreads, which only
39  * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
40  * do a wake_up() on their GP kthread when they insert a callback into any
41  * empty list, unless the rcu_nocb_poll boot parameter has been specified,
42  * in which case each kthread actively polls its CPU.  (Which isn't so great
43  * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
44  *
45  * This is intended to be used in conjunction with Frederic Weisbecker's
46  * adaptive-idle work, which would seriously reduce OS jitter on CPUs
47  * running CPU-bound user-mode computations.
48  *
49  * Offloading of callbacks can also be used as an energy-efficiency
50  * measure because CPUs with no RCU callbacks queued are more aggressive
51  * about entering dyntick-idle mode.
52  */
53 
54 
55 /*
56  * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
57  * If the list is invalid, a warning is emitted and all CPUs are offloaded.
58  */
rcu_nocb_setup(char * str)59 static int __init rcu_nocb_setup(char *str)
60 {
61 	alloc_bootmem_cpumask_var(&rcu_nocb_mask);
62 	if (*str == '=') {
63 		if (cpulist_parse(++str, rcu_nocb_mask)) {
64 			pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
65 			cpumask_setall(rcu_nocb_mask);
66 		}
67 	}
68 	rcu_state.nocb_is_setup = true;
69 	return 1;
70 }
71 __setup("rcu_nocbs", rcu_nocb_setup);
72 
parse_rcu_nocb_poll(char * arg)73 static int __init parse_rcu_nocb_poll(char *arg)
74 {
75 	rcu_nocb_poll = true;
76 	return 1;
77 }
78 __setup("rcu_nocb_poll", parse_rcu_nocb_poll);
79 
80 /*
81  * Don't bother bypassing ->cblist if the call_rcu() rate is low.
82  * After all, the main point of bypassing is to avoid lock contention
83  * on ->nocb_lock, which only can happen at high call_rcu() rates.
84  */
85 static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
86 module_param(nocb_nobypass_lim_per_jiffy, int, 0);
87 
88 /*
89  * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
90  * lock isn't immediately available, perform minimal sanity check.
91  */
rcu_nocb_bypass_lock(struct rcu_data * rdp)92 static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
93 	__acquires(&rdp->nocb_bypass_lock)
94 {
95 	lockdep_assert_irqs_disabled();
96 	if (raw_spin_trylock(&rdp->nocb_bypass_lock))
97 		return;
98 	/*
99 	 * Contention expected only when local enqueue collide with
100 	 * remote flush from kthreads.
101 	 */
102 	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
103 	raw_spin_lock(&rdp->nocb_bypass_lock);
104 }
105 
106 /*
107  * Conditionally acquire the specified rcu_data structure's
108  * ->nocb_bypass_lock.
109  */
rcu_nocb_bypass_trylock(struct rcu_data * rdp)110 static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
111 {
112 	lockdep_assert_irqs_disabled();
113 	return raw_spin_trylock(&rdp->nocb_bypass_lock);
114 }
115 
116 /*
117  * Release the specified rcu_data structure's ->nocb_bypass_lock.
118  */
rcu_nocb_bypass_unlock(struct rcu_data * rdp)119 static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
120 	__releases(&rdp->nocb_bypass_lock)
121 {
122 	lockdep_assert_irqs_disabled();
123 	raw_spin_unlock(&rdp->nocb_bypass_lock);
124 }
125 
126 /*
127  * Acquire the specified rcu_data structure's ->nocb_lock, but only
128  * if it corresponds to a no-CBs CPU.
129  */
rcu_nocb_lock(struct rcu_data * rdp)130 static void rcu_nocb_lock(struct rcu_data *rdp)
131 {
132 	lockdep_assert_irqs_disabled();
133 	if (!rcu_rdp_is_offloaded(rdp))
134 		return;
135 	raw_spin_lock(&rdp->nocb_lock);
136 }
137 
138 /*
139  * Release the specified rcu_data structure's ->nocb_lock, but only
140  * if it corresponds to a no-CBs CPU.
141  */
rcu_nocb_unlock(struct rcu_data * rdp)142 static void rcu_nocb_unlock(struct rcu_data *rdp)
143 {
144 	if (rcu_rdp_is_offloaded(rdp)) {
145 		lockdep_assert_irqs_disabled();
146 		raw_spin_unlock(&rdp->nocb_lock);
147 	}
148 }
149 
150 /*
151  * Release the specified rcu_data structure's ->nocb_lock and restore
152  * interrupts, but only if it corresponds to a no-CBs CPU.
153  */
rcu_nocb_unlock_irqrestore(struct rcu_data * rdp,unsigned long flags)154 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
155 				       unsigned long flags)
156 {
157 	if (rcu_rdp_is_offloaded(rdp)) {
158 		lockdep_assert_irqs_disabled();
159 		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
160 	} else {
161 		local_irq_restore(flags);
162 	}
163 }
164 
165 /* Lockdep check that ->cblist may be safely accessed. */
rcu_lockdep_assert_cblist_protected(struct rcu_data * rdp)166 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
167 {
168 	lockdep_assert_irqs_disabled();
169 	if (rcu_rdp_is_offloaded(rdp))
170 		lockdep_assert_held(&rdp->nocb_lock);
171 }
172 
173 /*
174  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
175  * grace period.
176  */
rcu_nocb_gp_cleanup(struct swait_queue_head * sq)177 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
178 {
179 	swake_up_all(sq);
180 }
181 
rcu_nocb_gp_get(struct rcu_node * rnp)182 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
183 {
184 	return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
185 }
186 
rcu_init_one_nocb(struct rcu_node * rnp)187 static void rcu_init_one_nocb(struct rcu_node *rnp)
188 {
189 	init_swait_queue_head(&rnp->nocb_gp_wq[0]);
190 	init_swait_queue_head(&rnp->nocb_gp_wq[1]);
191 }
192 
__wake_nocb_gp(struct rcu_data * rdp_gp,struct rcu_data * rdp,bool force,unsigned long flags)193 static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
194 			   struct rcu_data *rdp,
195 			   bool force, unsigned long flags)
196 	__releases(rdp_gp->nocb_gp_lock)
197 {
198 	bool needwake = false;
199 
200 	if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
201 		raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
202 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
203 				    TPS("AlreadyAwake"));
204 		return false;
205 	}
206 
207 	if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
208 		WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
209 		del_timer(&rdp_gp->nocb_timer);
210 	}
211 
212 	if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
213 		WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
214 		needwake = true;
215 	}
216 	raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
217 	if (needwake) {
218 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
219 		swake_up_one_online(&rdp_gp->nocb_gp_wq);
220 	}
221 
222 	return needwake;
223 }
224 
225 /*
226  * Kick the GP kthread for this NOCB group.
227  */
wake_nocb_gp(struct rcu_data * rdp,bool force)228 static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
229 {
230 	unsigned long flags;
231 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
232 
233 	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
234 	return __wake_nocb_gp(rdp_gp, rdp, force, flags);
235 }
236 
237 #ifdef CONFIG_RCU_LAZY
238 /*
239  * LAZY_FLUSH_JIFFIES decides the maximum amount of time that
240  * can elapse before lazy callbacks are flushed. Lazy callbacks
241  * could be flushed much earlier for a number of other reasons
242  * however, LAZY_FLUSH_JIFFIES will ensure no lazy callbacks are
243  * left unsubmitted to RCU after those many jiffies.
244  */
245 #define LAZY_FLUSH_JIFFIES (10 * HZ)
246 static unsigned long jiffies_lazy_flush = LAZY_FLUSH_JIFFIES;
247 
248 // To be called only from test code.
rcu_set_jiffies_lazy_flush(unsigned long jif)249 void rcu_set_jiffies_lazy_flush(unsigned long jif)
250 {
251 	jiffies_lazy_flush = jif;
252 }
253 EXPORT_SYMBOL(rcu_set_jiffies_lazy_flush);
254 
rcu_get_jiffies_lazy_flush(void)255 unsigned long rcu_get_jiffies_lazy_flush(void)
256 {
257 	return jiffies_lazy_flush;
258 }
259 EXPORT_SYMBOL(rcu_get_jiffies_lazy_flush);
260 #endif
261 
262 /*
263  * Arrange to wake the GP kthread for this NOCB group at some future
264  * time when it is safe to do so.
265  */
wake_nocb_gp_defer(struct rcu_data * rdp,int waketype,const char * reason)266 static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
267 			       const char *reason)
268 {
269 	unsigned long flags;
270 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
271 
272 	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
273 
274 	/*
275 	 * Bypass wakeup overrides previous deferments. In case of
276 	 * callback storms, no need to wake up too early.
277 	 */
278 	if (waketype == RCU_NOCB_WAKE_LAZY &&
279 	    rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) {
280 		mod_timer(&rdp_gp->nocb_timer, jiffies + rcu_get_jiffies_lazy_flush());
281 		WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
282 	} else if (waketype == RCU_NOCB_WAKE_BYPASS) {
283 		mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
284 		WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
285 	} else {
286 		if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
287 			mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
288 		if (rdp_gp->nocb_defer_wakeup < waketype)
289 			WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
290 	}
291 
292 	raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
293 
294 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
295 }
296 
297 /*
298  * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
299  * However, if there is a callback to be enqueued and if ->nocb_bypass
300  * proves to be initially empty, just return false because the no-CB GP
301  * kthread may need to be awakened in this case.
302  *
303  * Return true if there was something to be flushed and it succeeded, otherwise
304  * false.
305  *
306  * Note that this function always returns true if rhp is NULL.
307  */
rcu_nocb_do_flush_bypass(struct rcu_data * rdp,struct rcu_head * rhp_in,unsigned long j,bool lazy)308 static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp_in,
309 				     unsigned long j, bool lazy)
310 {
311 	struct rcu_cblist rcl;
312 	struct rcu_head *rhp = rhp_in;
313 
314 	WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
315 	rcu_lockdep_assert_cblist_protected(rdp);
316 	lockdep_assert_held(&rdp->nocb_bypass_lock);
317 	if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
318 		raw_spin_unlock(&rdp->nocb_bypass_lock);
319 		return false;
320 	}
321 	/* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
322 	if (rhp)
323 		rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
324 
325 	/*
326 	 * If the new CB requested was a lazy one, queue it onto the main
327 	 * ->cblist so that we can take advantage of the grace-period that will
328 	 * happen regardless. But queue it onto the bypass list first so that
329 	 * the lazy CB is ordered with the existing CBs in the bypass list.
330 	 */
331 	if (lazy && rhp) {
332 		rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
333 		rhp = NULL;
334 	}
335 	rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
336 	WRITE_ONCE(rdp->lazy_len, 0);
337 
338 	rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
339 	WRITE_ONCE(rdp->nocb_bypass_first, j);
340 	rcu_nocb_bypass_unlock(rdp);
341 	return true;
342 }
343 
344 /*
345  * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
346  * However, if there is a callback to be enqueued and if ->nocb_bypass
347  * proves to be initially empty, just return false because the no-CB GP
348  * kthread may need to be awakened in this case.
349  *
350  * Note that this function always returns true if rhp is NULL.
351  */
rcu_nocb_flush_bypass(struct rcu_data * rdp,struct rcu_head * rhp,unsigned long j,bool lazy)352 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
353 				  unsigned long j, bool lazy)
354 {
355 	if (!rcu_rdp_is_offloaded(rdp))
356 		return true;
357 	rcu_lockdep_assert_cblist_protected(rdp);
358 	rcu_nocb_bypass_lock(rdp);
359 	return rcu_nocb_do_flush_bypass(rdp, rhp, j, lazy);
360 }
361 
362 /*
363  * If the ->nocb_bypass_lock is immediately available, flush the
364  * ->nocb_bypass queue into ->cblist.
365  */
rcu_nocb_try_flush_bypass(struct rcu_data * rdp,unsigned long j)366 static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
367 {
368 	rcu_lockdep_assert_cblist_protected(rdp);
369 	if (!rcu_rdp_is_offloaded(rdp) ||
370 	    !rcu_nocb_bypass_trylock(rdp))
371 		return;
372 	WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j, false));
373 }
374 
375 /*
376  * See whether it is appropriate to use the ->nocb_bypass list in order
377  * to control contention on ->nocb_lock.  A limited number of direct
378  * enqueues are permitted into ->cblist per jiffy.  If ->nocb_bypass
379  * is non-empty, further callbacks must be placed into ->nocb_bypass,
380  * otherwise rcu_barrier() breaks.  Use rcu_nocb_flush_bypass() to switch
381  * back to direct use of ->cblist.  However, ->nocb_bypass should not be
382  * used if ->cblist is empty, because otherwise callbacks can be stranded
383  * on ->nocb_bypass because we cannot count on the current CPU ever again
384  * invoking call_rcu().  The general rule is that if ->nocb_bypass is
385  * non-empty, the corresponding no-CBs grace-period kthread must not be
386  * in an indefinite sleep state.
387  *
388  * Finally, it is not permitted to use the bypass during early boot,
389  * as doing so would confuse the auto-initialization code.  Besides
390  * which, there is no point in worrying about lock contention while
391  * there is only one CPU in operation.
392  */
rcu_nocb_try_bypass(struct rcu_data * rdp,struct rcu_head * rhp,bool * was_alldone,unsigned long flags,bool lazy)393 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
394 				bool *was_alldone, unsigned long flags,
395 				bool lazy)
396 {
397 	unsigned long c;
398 	unsigned long cur_gp_seq;
399 	unsigned long j = jiffies;
400 	long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
401 	bool bypass_is_lazy = (ncbs == READ_ONCE(rdp->lazy_len));
402 
403 	lockdep_assert_irqs_disabled();
404 
405 	// Pure softirq/rcuc based processing: no bypassing, no
406 	// locking.
407 	if (!rcu_rdp_is_offloaded(rdp)) {
408 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
409 		return false;
410 	}
411 
412 	// Don't use ->nocb_bypass during early boot.
413 	if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
414 		rcu_nocb_lock(rdp);
415 		WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
416 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
417 		return false;
418 	}
419 
420 	// If we have advanced to a new jiffy, reset counts to allow
421 	// moving back from ->nocb_bypass to ->cblist.
422 	if (j == rdp->nocb_nobypass_last) {
423 		c = rdp->nocb_nobypass_count + 1;
424 	} else {
425 		WRITE_ONCE(rdp->nocb_nobypass_last, j);
426 		c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
427 		if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
428 				 nocb_nobypass_lim_per_jiffy))
429 			c = 0;
430 		else if (c > nocb_nobypass_lim_per_jiffy)
431 			c = nocb_nobypass_lim_per_jiffy;
432 	}
433 	WRITE_ONCE(rdp->nocb_nobypass_count, c);
434 
435 	// If there hasn't yet been all that many ->cblist enqueues
436 	// this jiffy, tell the caller to enqueue onto ->cblist.  But flush
437 	// ->nocb_bypass first.
438 	// Lazy CBs throttle this back and do immediate bypass queuing.
439 	if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy && !lazy) {
440 		rcu_nocb_lock(rdp);
441 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
442 		if (*was_alldone)
443 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
444 					    TPS("FirstQ"));
445 
446 		WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j, false));
447 		WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
448 		return false; // Caller must enqueue the callback.
449 	}
450 
451 	// If ->nocb_bypass has been used too long or is too full,
452 	// flush ->nocb_bypass to ->cblist.
453 	if ((ncbs && !bypass_is_lazy && j != READ_ONCE(rdp->nocb_bypass_first)) ||
454 	    (ncbs &&  bypass_is_lazy &&
455 	     (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + rcu_get_jiffies_lazy_flush()))) ||
456 	    ncbs >= qhimark) {
457 		rcu_nocb_lock(rdp);
458 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
459 
460 		if (!rcu_nocb_flush_bypass(rdp, rhp, j, lazy)) {
461 			if (*was_alldone)
462 				trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
463 						    TPS("FirstQ"));
464 			WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
465 			return false; // Caller must enqueue the callback.
466 		}
467 		if (j != rdp->nocb_gp_adv_time &&
468 		    rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
469 		    rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
470 			rcu_advance_cbs_nowake(rdp->mynode, rdp);
471 			rdp->nocb_gp_adv_time = j;
472 		}
473 
474 		// The flush succeeded and we moved CBs into the regular list.
475 		// Don't wait for the wake up timer as it may be too far ahead.
476 		// Wake up the GP thread now instead, if the cblist was empty.
477 		__call_rcu_nocb_wake(rdp, *was_alldone, flags);
478 
479 		return true; // Callback already enqueued.
480 	}
481 
482 	// We need to use the bypass.
483 	rcu_nocb_bypass_lock(rdp);
484 	ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
485 	rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
486 	rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
487 
488 	if (lazy)
489 		WRITE_ONCE(rdp->lazy_len, rdp->lazy_len + 1);
490 
491 	if (!ncbs) {
492 		WRITE_ONCE(rdp->nocb_bypass_first, j);
493 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
494 	}
495 	rcu_nocb_bypass_unlock(rdp);
496 
497 	// A wake up of the grace period kthread or timer adjustment
498 	// needs to be done only if:
499 	// 1. Bypass list was fully empty before (this is the first
500 	//    bypass list entry), or:
501 	// 2. Both of these conditions are met:
502 	//    a. The bypass list previously had only lazy CBs, and:
503 	//    b. The new CB is non-lazy.
504 	if (!ncbs || (bypass_is_lazy && !lazy)) {
505 		// No-CBs GP kthread might be indefinitely asleep, if so, wake.
506 		rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
507 		if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
508 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
509 					    TPS("FirstBQwake"));
510 			__call_rcu_nocb_wake(rdp, true, flags);
511 		} else {
512 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
513 					    TPS("FirstBQnoWake"));
514 			rcu_nocb_unlock(rdp);
515 		}
516 	}
517 	return true; // Callback already enqueued.
518 }
519 
520 /*
521  * Awaken the no-CBs grace-period kthread if needed, either due to it
522  * legitimately being asleep or due to overload conditions.
523  *
524  * If warranted, also wake up the kthread servicing this CPUs queues.
525  */
__call_rcu_nocb_wake(struct rcu_data * rdp,bool was_alldone,unsigned long flags)526 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
527 				 unsigned long flags)
528 				 __releases(rdp->nocb_lock)
529 {
530 	long bypass_len;
531 	unsigned long cur_gp_seq;
532 	unsigned long j;
533 	long lazy_len;
534 	long len;
535 	struct task_struct *t;
536 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
537 
538 	// If we are being polled or there is no kthread, just leave.
539 	t = READ_ONCE(rdp->nocb_gp_kthread);
540 	if (rcu_nocb_poll || !t) {
541 		rcu_nocb_unlock(rdp);
542 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
543 				    TPS("WakeNotPoll"));
544 		return;
545 	}
546 	// Need to actually to a wakeup.
547 	len = rcu_segcblist_n_cbs(&rdp->cblist);
548 	bypass_len = rcu_cblist_n_cbs(&rdp->nocb_bypass);
549 	lazy_len = READ_ONCE(rdp->lazy_len);
550 	if (was_alldone) {
551 		rdp->qlen_last_fqs_check = len;
552 		// Only lazy CBs in bypass list
553 		if (lazy_len && bypass_len == lazy_len) {
554 			rcu_nocb_unlock(rdp);
555 			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY,
556 					   TPS("WakeLazy"));
557 		} else if (!irqs_disabled_flags(flags) && cpu_online(rdp->cpu)) {
558 			/* ... if queue was empty ... */
559 			rcu_nocb_unlock(rdp);
560 			wake_nocb_gp(rdp, false);
561 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
562 					    TPS("WakeEmpty"));
563 		} else {
564 			/*
565 			 * Don't do the wake-up upfront on fragile paths.
566 			 * Also offline CPUs can't call swake_up_one_online() from
567 			 * (soft-)IRQs. Rely on the final deferred wake-up from
568 			 * rcutree_report_cpu_dead()
569 			 */
570 			rcu_nocb_unlock(rdp);
571 			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
572 					   TPS("WakeEmptyIsDeferred"));
573 		}
574 	} else if (len > rdp->qlen_last_fqs_check + qhimark) {
575 		/* ... or if many callbacks queued. */
576 		rdp->qlen_last_fqs_check = len;
577 		j = jiffies;
578 		if (j != rdp->nocb_gp_adv_time &&
579 		    rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
580 		    rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
581 			rcu_advance_cbs_nowake(rdp->mynode, rdp);
582 			rdp->nocb_gp_adv_time = j;
583 		}
584 		smp_mb(); /* Enqueue before timer_pending(). */
585 		if ((rdp->nocb_cb_sleep ||
586 		     !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
587 		    !timer_pending(&rdp_gp->nocb_timer)) {
588 			rcu_nocb_unlock(rdp);
589 			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
590 					   TPS("WakeOvfIsDeferred"));
591 		} else {
592 			rcu_nocb_unlock(rdp);
593 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
594 		}
595 	} else {
596 		rcu_nocb_unlock(rdp);
597 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
598 	}
599 }
600 
call_rcu_nocb(struct rcu_data * rdp,struct rcu_head * head,rcu_callback_t func,unsigned long flags,bool lazy)601 static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
602 			  rcu_callback_t func, unsigned long flags, bool lazy)
603 {
604 	bool was_alldone;
605 
606 	if (!rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) {
607 		/* Not enqueued on bypass but locked, do regular enqueue */
608 		rcutree_enqueue(rdp, head, func);
609 		__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
610 	}
611 }
612 
nocb_gp_toggle_rdp(struct rcu_data * rdp_gp,struct rcu_data * rdp)613 static void nocb_gp_toggle_rdp(struct rcu_data *rdp_gp, struct rcu_data *rdp)
614 {
615 	struct rcu_segcblist *cblist = &rdp->cblist;
616 	unsigned long flags;
617 
618 	/*
619 	 * Locking orders future de-offloaded callbacks enqueue against previous
620 	 * handling of this rdp. Ie: Make sure rcuog is done with this rdp before
621 	 * deoffloaded callbacks can be enqueued.
622 	 */
623 	raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
624 	if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
625 		/*
626 		 * Offloading. Set our flag and notify the offload worker.
627 		 * We will handle this rdp until it ever gets de-offloaded.
628 		 */
629 		list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
630 		rcu_segcblist_set_flags(cblist, SEGCBLIST_OFFLOADED);
631 	} else {
632 		/*
633 		 * De-offloading. Clear our flag and notify the de-offload worker.
634 		 * We will ignore this rdp until it ever gets re-offloaded.
635 		 */
636 		list_del(&rdp->nocb_entry_rdp);
637 		rcu_segcblist_clear_flags(cblist, SEGCBLIST_OFFLOADED);
638 	}
639 	raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
640 }
641 
nocb_gp_sleep(struct rcu_data * my_rdp,int cpu)642 static void nocb_gp_sleep(struct rcu_data *my_rdp, int cpu)
643 {
644 	trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
645 	swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
646 					!READ_ONCE(my_rdp->nocb_gp_sleep));
647 	trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
648 }
649 
650 /*
651  * No-CBs GP kthreads come here to wait for additional callbacks to show up
652  * or for grace periods to end.
653  */
nocb_gp_wait(struct rcu_data * my_rdp)654 static void nocb_gp_wait(struct rcu_data *my_rdp)
655 {
656 	bool bypass = false;
657 	int __maybe_unused cpu = my_rdp->cpu;
658 	unsigned long cur_gp_seq;
659 	unsigned long flags;
660 	bool gotcbs = false;
661 	unsigned long j = jiffies;
662 	bool lazy = false;
663 	bool needwait_gp = false; // This prevents actual uninitialized use.
664 	bool needwake;
665 	bool needwake_gp;
666 	struct rcu_data *rdp, *rdp_toggling = NULL;
667 	struct rcu_node *rnp;
668 	unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
669 	bool wasempty = false;
670 
671 	/*
672 	 * Each pass through the following loop checks for CBs and for the
673 	 * nearest grace period (if any) to wait for next.  The CB kthreads
674 	 * and the global grace-period kthread are awakened if needed.
675 	 */
676 	WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
677 	/*
678 	 * An rcu_data structure is removed from the list after its
679 	 * CPU is de-offloaded and added to the list before that CPU is
680 	 * (re-)offloaded.  If the following loop happens to be referencing
681 	 * that rcu_data structure during the time that the corresponding
682 	 * CPU is de-offloaded and then immediately re-offloaded, this
683 	 * loop's rdp pointer will be carried to the end of the list by
684 	 * the resulting pair of list operations.  This can cause the loop
685 	 * to skip over some of the rcu_data structures that were supposed
686 	 * to have been scanned.  Fortunately a new iteration through the
687 	 * entire loop is forced after a given CPU's rcu_data structure
688 	 * is added to the list, so the skipped-over rcu_data structures
689 	 * won't be ignored for long.
690 	 */
691 	list_for_each_entry(rdp, &my_rdp->nocb_head_rdp, nocb_entry_rdp) {
692 		long bypass_ncbs;
693 		bool flush_bypass = false;
694 		long lazy_ncbs;
695 
696 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
697 		rcu_nocb_lock_irqsave(rdp, flags);
698 		lockdep_assert_held(&rdp->nocb_lock);
699 		bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
700 		lazy_ncbs = READ_ONCE(rdp->lazy_len);
701 
702 		if (bypass_ncbs && (lazy_ncbs == bypass_ncbs) &&
703 		    (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + rcu_get_jiffies_lazy_flush()) ||
704 		     bypass_ncbs > 2 * qhimark)) {
705 			flush_bypass = true;
706 		} else if (bypass_ncbs && (lazy_ncbs != bypass_ncbs) &&
707 		    (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
708 		     bypass_ncbs > 2 * qhimark)) {
709 			flush_bypass = true;
710 		} else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
711 			rcu_nocb_unlock_irqrestore(rdp, flags);
712 			continue; /* No callbacks here, try next. */
713 		}
714 
715 		if (flush_bypass) {
716 			// Bypass full or old, so flush it.
717 			(void)rcu_nocb_try_flush_bypass(rdp, j);
718 			bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
719 			lazy_ncbs = READ_ONCE(rdp->lazy_len);
720 		}
721 
722 		if (bypass_ncbs) {
723 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
724 					    bypass_ncbs == lazy_ncbs ? TPS("Lazy") : TPS("Bypass"));
725 			if (bypass_ncbs == lazy_ncbs)
726 				lazy = true;
727 			else
728 				bypass = true;
729 		}
730 		rnp = rdp->mynode;
731 
732 		// Advance callbacks if helpful and low contention.
733 		needwake_gp = false;
734 		if (!rcu_segcblist_restempty(&rdp->cblist,
735 					     RCU_NEXT_READY_TAIL) ||
736 		    (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
737 		     rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
738 			raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
739 			needwake_gp = rcu_advance_cbs(rnp, rdp);
740 			wasempty = rcu_segcblist_restempty(&rdp->cblist,
741 							   RCU_NEXT_READY_TAIL);
742 			raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
743 		}
744 		// Need to wait on some grace period?
745 		WARN_ON_ONCE(wasempty &&
746 			     !rcu_segcblist_restempty(&rdp->cblist,
747 						      RCU_NEXT_READY_TAIL));
748 		if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
749 			if (!needwait_gp ||
750 			    ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
751 				wait_gp_seq = cur_gp_seq;
752 			needwait_gp = true;
753 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
754 					    TPS("NeedWaitGP"));
755 		}
756 		if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
757 			needwake = rdp->nocb_cb_sleep;
758 			WRITE_ONCE(rdp->nocb_cb_sleep, false);
759 		} else {
760 			needwake = false;
761 		}
762 		rcu_nocb_unlock_irqrestore(rdp, flags);
763 		if (needwake) {
764 			swake_up_one(&rdp->nocb_cb_wq);
765 			gotcbs = true;
766 		}
767 		if (needwake_gp)
768 			rcu_gp_kthread_wake();
769 	}
770 
771 	my_rdp->nocb_gp_bypass = bypass;
772 	my_rdp->nocb_gp_gp = needwait_gp;
773 	my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
774 
775 	// At least one child with non-empty ->nocb_bypass, so set
776 	// timer in order to avoid stranding its callbacks.
777 	if (!rcu_nocb_poll) {
778 		// If bypass list only has lazy CBs. Add a deferred lazy wake up.
779 		if (lazy && !bypass) {
780 			wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_LAZY,
781 					TPS("WakeLazyIsDeferred"));
782 		// Otherwise add a deferred bypass wake up.
783 		} else if (bypass) {
784 			wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
785 					TPS("WakeBypassIsDeferred"));
786 		}
787 	}
788 
789 	if (rcu_nocb_poll) {
790 		/* Polling, so trace if first poll in the series. */
791 		if (gotcbs)
792 			trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
793 		if (list_empty(&my_rdp->nocb_head_rdp)) {
794 			raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
795 			if (!my_rdp->nocb_toggling_rdp)
796 				WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
797 			raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
798 			/* Wait for any offloading rdp */
799 			nocb_gp_sleep(my_rdp, cpu);
800 		} else {
801 			schedule_timeout_idle(1);
802 		}
803 	} else if (!needwait_gp) {
804 		/* Wait for callbacks to appear. */
805 		nocb_gp_sleep(my_rdp, cpu);
806 	} else {
807 		rnp = my_rdp->mynode;
808 		trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
809 		swait_event_interruptible_exclusive(
810 			rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
811 			rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
812 			!READ_ONCE(my_rdp->nocb_gp_sleep));
813 		trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
814 	}
815 
816 	if (!rcu_nocb_poll) {
817 		raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
818 		// (De-)queue an rdp to/from the group if its nocb state is changing
819 		rdp_toggling = my_rdp->nocb_toggling_rdp;
820 		if (rdp_toggling)
821 			my_rdp->nocb_toggling_rdp = NULL;
822 
823 		if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
824 			WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
825 			del_timer(&my_rdp->nocb_timer);
826 		}
827 		WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
828 		raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
829 	} else {
830 		rdp_toggling = READ_ONCE(my_rdp->nocb_toggling_rdp);
831 		if (rdp_toggling) {
832 			/*
833 			 * Paranoid locking to make sure nocb_toggling_rdp is well
834 			 * reset *before* we (re)set SEGCBLIST_KTHREAD_GP or we could
835 			 * race with another round of nocb toggling for this rdp.
836 			 * Nocb locking should prevent from that already but we stick
837 			 * to paranoia, especially in rare path.
838 			 */
839 			raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
840 			my_rdp->nocb_toggling_rdp = NULL;
841 			raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
842 		}
843 	}
844 
845 	if (rdp_toggling) {
846 		nocb_gp_toggle_rdp(my_rdp, rdp_toggling);
847 		swake_up_one(&rdp_toggling->nocb_state_wq);
848 	}
849 
850 	my_rdp->nocb_gp_seq = -1;
851 	WARN_ON(signal_pending(current));
852 }
853 
854 /*
855  * No-CBs grace-period-wait kthread.  There is one of these per group
856  * of CPUs, but only once at least one CPU in that group has come online
857  * at least once since boot.  This kthread checks for newly posted
858  * callbacks from any of the CPUs it is responsible for, waits for a
859  * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
860  * that then have callback-invocation work to do.
861  */
rcu_nocb_gp_kthread(void * arg)862 static int rcu_nocb_gp_kthread(void *arg)
863 {
864 	struct rcu_data *rdp = arg;
865 
866 	for (;;) {
867 		WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
868 		nocb_gp_wait(rdp);
869 		cond_resched_tasks_rcu_qs();
870 	}
871 	return 0;
872 }
873 
nocb_cb_wait_cond(struct rcu_data * rdp)874 static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
875 {
876 	return !READ_ONCE(rdp->nocb_cb_sleep) || kthread_should_park();
877 }
878 
879 /*
880  * Invoke any ready callbacks from the corresponding no-CBs CPU,
881  * then, if there are no more, wait for more to appear.
882  */
nocb_cb_wait(struct rcu_data * rdp)883 static void nocb_cb_wait(struct rcu_data *rdp)
884 {
885 	struct rcu_segcblist *cblist = &rdp->cblist;
886 	unsigned long cur_gp_seq;
887 	unsigned long flags;
888 	bool needwake_gp = false;
889 	struct rcu_node *rnp = rdp->mynode;
890 
891 	swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
892 					    nocb_cb_wait_cond(rdp));
893 	if (kthread_should_park()) {
894 		/*
895 		 * kthread_park() must be preceded by an rcu_barrier().
896 		 * But yet another rcu_barrier() might have sneaked in between
897 		 * the barrier callback execution and the callbacks counter
898 		 * decrement.
899 		 */
900 		if (rdp->nocb_cb_sleep) {
901 			rcu_nocb_lock_irqsave(rdp, flags);
902 			WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist));
903 			rcu_nocb_unlock_irqrestore(rdp, flags);
904 			kthread_parkme();
905 		}
906 	} else if (READ_ONCE(rdp->nocb_cb_sleep)) {
907 		WARN_ON(signal_pending(current));
908 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
909 	}
910 
911 	WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
912 
913 	local_irq_save(flags);
914 	rcu_momentary_eqs();
915 	local_irq_restore(flags);
916 	/*
917 	 * Disable BH to provide the expected environment.  Also, when
918 	 * transitioning to/from NOCB mode, a self-requeuing callback might
919 	 * be invoked from softirq.  A short grace period could cause both
920 	 * instances of this callback would execute concurrently.
921 	 */
922 	local_bh_disable();
923 	rcu_do_batch(rdp);
924 	local_bh_enable();
925 	lockdep_assert_irqs_enabled();
926 	rcu_nocb_lock_irqsave(rdp, flags);
927 	if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
928 	    rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
929 	    raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
930 		needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
931 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
932 	}
933 
934 	if (!rcu_segcblist_ready_cbs(cblist)) {
935 		WRITE_ONCE(rdp->nocb_cb_sleep, true);
936 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
937 	} else {
938 		WRITE_ONCE(rdp->nocb_cb_sleep, false);
939 	}
940 
941 	rcu_nocb_unlock_irqrestore(rdp, flags);
942 	if (needwake_gp)
943 		rcu_gp_kthread_wake();
944 }
945 
946 /*
947  * Per-rcu_data kthread, but only for no-CBs CPUs.  Repeatedly invoke
948  * nocb_cb_wait() to do the dirty work.
949  */
rcu_nocb_cb_kthread(void * arg)950 static int rcu_nocb_cb_kthread(void *arg)
951 {
952 	struct rcu_data *rdp = arg;
953 
954 	// Each pass through this loop does one callback batch, and,
955 	// if there are no more ready callbacks, waits for them.
956 	for (;;) {
957 		nocb_cb_wait(rdp);
958 		cond_resched_tasks_rcu_qs();
959 	}
960 	return 0;
961 }
962 
963 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
rcu_nocb_need_deferred_wakeup(struct rcu_data * rdp,int level)964 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
965 {
966 	return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
967 }
968 
969 /* Do a deferred wakeup of rcu_nocb_kthread(). */
do_nocb_deferred_wakeup_common(struct rcu_data * rdp_gp,struct rcu_data * rdp,int level,unsigned long flags)970 static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
971 					   struct rcu_data *rdp, int level,
972 					   unsigned long flags)
973 	__releases(rdp_gp->nocb_gp_lock)
974 {
975 	int ndw;
976 	int ret;
977 
978 	if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
979 		raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
980 		return false;
981 	}
982 
983 	ndw = rdp_gp->nocb_defer_wakeup;
984 	ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
985 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
986 
987 	return ret;
988 }
989 
990 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
do_nocb_deferred_wakeup_timer(struct timer_list * t)991 static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
992 {
993 	unsigned long flags;
994 	struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
995 
996 	WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
997 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
998 
999 	raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
1000 	smp_mb__after_spinlock(); /* Timer expire before wakeup. */
1001 	do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
1002 }
1003 
1004 /*
1005  * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
1006  * This means we do an inexact common-case check.  Note that if
1007  * we miss, ->nocb_timer will eventually clean things up.
1008  */
do_nocb_deferred_wakeup(struct rcu_data * rdp)1009 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
1010 {
1011 	unsigned long flags;
1012 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1013 
1014 	if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
1015 		return false;
1016 
1017 	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
1018 	return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
1019 }
1020 
rcu_nocb_flush_deferred_wakeup(void)1021 void rcu_nocb_flush_deferred_wakeup(void)
1022 {
1023 	do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
1024 }
1025 EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
1026 
rcu_nocb_queue_toggle_rdp(struct rcu_data * rdp)1027 static int rcu_nocb_queue_toggle_rdp(struct rcu_data *rdp)
1028 {
1029 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1030 	bool wake_gp = false;
1031 	unsigned long flags;
1032 
1033 	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
1034 	// Queue this rdp for add/del to/from the list to iterate on rcuog
1035 	WRITE_ONCE(rdp_gp->nocb_toggling_rdp, rdp);
1036 	if (rdp_gp->nocb_gp_sleep) {
1037 		rdp_gp->nocb_gp_sleep = false;
1038 		wake_gp = true;
1039 	}
1040 	raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
1041 
1042 	return wake_gp;
1043 }
1044 
rcu_nocb_rdp_deoffload_wait_cond(struct rcu_data * rdp)1045 static bool rcu_nocb_rdp_deoffload_wait_cond(struct rcu_data *rdp)
1046 {
1047 	unsigned long flags;
1048 	bool ret;
1049 
1050 	/*
1051 	 * Locking makes sure rcuog is done handling this rdp before deoffloaded
1052 	 * enqueue can happen. Also it keeps the SEGCBLIST_OFFLOADED flag stable
1053 	 * while the ->nocb_lock is held.
1054 	 */
1055 	raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1056 	ret = !rcu_segcblist_test_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
1057 	raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1058 
1059 	return ret;
1060 }
1061 
rcu_nocb_rdp_deoffload(struct rcu_data * rdp)1062 static int rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
1063 {
1064 	unsigned long flags;
1065 	int wake_gp;
1066 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1067 
1068 	/* CPU must be offline, unless it's early boot */
1069 	WARN_ON_ONCE(cpu_online(rdp->cpu) && rdp->cpu != raw_smp_processor_id());
1070 
1071 	pr_info("De-offloading %d\n", rdp->cpu);
1072 
1073 	/* Flush all callbacks from segcblist and bypass */
1074 	rcu_barrier();
1075 
1076 	/*
1077 	 * Make sure the rcuoc kthread isn't in the middle of a nocb locked
1078 	 * sequence while offloading is deactivated, along with nocb locking.
1079 	 */
1080 	if (rdp->nocb_cb_kthread)
1081 		kthread_park(rdp->nocb_cb_kthread);
1082 
1083 	rcu_nocb_lock_irqsave(rdp, flags);
1084 	WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
1085 	WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist));
1086 	rcu_nocb_unlock_irqrestore(rdp, flags);
1087 
1088 	wake_gp = rcu_nocb_queue_toggle_rdp(rdp);
1089 
1090 	mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
1091 
1092 	if (rdp_gp->nocb_gp_kthread) {
1093 		if (wake_gp)
1094 			wake_up_process(rdp_gp->nocb_gp_kthread);
1095 
1096 		swait_event_exclusive(rdp->nocb_state_wq,
1097 				      rcu_nocb_rdp_deoffload_wait_cond(rdp));
1098 	} else {
1099 		/*
1100 		 * No kthread to clear the flags for us or remove the rdp from the nocb list
1101 		 * to iterate. Do it here instead. Locking doesn't look stricly necessary
1102 		 * but we stick to paranoia in this rare path.
1103 		 */
1104 		raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1105 		rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
1106 		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1107 
1108 		list_del(&rdp->nocb_entry_rdp);
1109 	}
1110 
1111 	mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
1112 
1113 	return 0;
1114 }
1115 
rcu_nocb_cpu_deoffload(int cpu)1116 int rcu_nocb_cpu_deoffload(int cpu)
1117 {
1118 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1119 	int ret = 0;
1120 
1121 	cpus_read_lock();
1122 	mutex_lock(&rcu_state.nocb_mutex);
1123 	if (rcu_rdp_is_offloaded(rdp)) {
1124 		if (!cpu_online(cpu)) {
1125 			ret = rcu_nocb_rdp_deoffload(rdp);
1126 			if (!ret)
1127 				cpumask_clear_cpu(cpu, rcu_nocb_mask);
1128 		} else {
1129 			pr_info("NOCB: Cannot CB-deoffload online CPU %d\n", rdp->cpu);
1130 			ret = -EINVAL;
1131 		}
1132 	}
1133 	mutex_unlock(&rcu_state.nocb_mutex);
1134 	cpus_read_unlock();
1135 
1136 	return ret;
1137 }
1138 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
1139 
rcu_nocb_rdp_offload_wait_cond(struct rcu_data * rdp)1140 static bool rcu_nocb_rdp_offload_wait_cond(struct rcu_data *rdp)
1141 {
1142 	unsigned long flags;
1143 	bool ret;
1144 
1145 	raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1146 	ret = rcu_segcblist_test_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
1147 	raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1148 
1149 	return ret;
1150 }
1151 
rcu_nocb_rdp_offload(struct rcu_data * rdp)1152 static int rcu_nocb_rdp_offload(struct rcu_data *rdp)
1153 {
1154 	int wake_gp;
1155 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1156 
1157 	WARN_ON_ONCE(cpu_online(rdp->cpu));
1158 	/*
1159 	 * For now we only support re-offload, ie: the rdp must have been
1160 	 * offloaded on boot first.
1161 	 */
1162 	if (!rdp->nocb_gp_rdp)
1163 		return -EINVAL;
1164 
1165 	if (WARN_ON_ONCE(!rdp_gp->nocb_gp_kthread))
1166 		return -EINVAL;
1167 
1168 	pr_info("Offloading %d\n", rdp->cpu);
1169 
1170 	WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
1171 	WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist));
1172 
1173 	wake_gp = rcu_nocb_queue_toggle_rdp(rdp);
1174 	if (wake_gp)
1175 		wake_up_process(rdp_gp->nocb_gp_kthread);
1176 
1177 	swait_event_exclusive(rdp->nocb_state_wq,
1178 			      rcu_nocb_rdp_offload_wait_cond(rdp));
1179 
1180 	kthread_unpark(rdp->nocb_cb_kthread);
1181 
1182 	return 0;
1183 }
1184 
rcu_nocb_cpu_offload(int cpu)1185 int rcu_nocb_cpu_offload(int cpu)
1186 {
1187 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1188 	int ret = 0;
1189 
1190 	cpus_read_lock();
1191 	mutex_lock(&rcu_state.nocb_mutex);
1192 	if (!rcu_rdp_is_offloaded(rdp)) {
1193 		if (!cpu_online(cpu)) {
1194 			ret = rcu_nocb_rdp_offload(rdp);
1195 			if (!ret)
1196 				cpumask_set_cpu(cpu, rcu_nocb_mask);
1197 		} else {
1198 			pr_info("NOCB: Cannot CB-offload online CPU %d\n", rdp->cpu);
1199 			ret = -EINVAL;
1200 		}
1201 	}
1202 	mutex_unlock(&rcu_state.nocb_mutex);
1203 	cpus_read_unlock();
1204 
1205 	return ret;
1206 }
1207 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
1208 
1209 #ifdef CONFIG_RCU_LAZY
1210 static unsigned long
lazy_rcu_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1211 lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1212 {
1213 	int cpu;
1214 	unsigned long count = 0;
1215 
1216 	if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask)))
1217 		return 0;
1218 
1219 	/*  Protect rcu_nocb_mask against concurrent (de-)offloading. */
1220 	if (!mutex_trylock(&rcu_state.nocb_mutex))
1221 		return 0;
1222 
1223 	/* Snapshot count of all CPUs */
1224 	for_each_cpu(cpu, rcu_nocb_mask) {
1225 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1226 
1227 		count +=  READ_ONCE(rdp->lazy_len);
1228 	}
1229 
1230 	mutex_unlock(&rcu_state.nocb_mutex);
1231 
1232 	return count ? count : SHRINK_EMPTY;
1233 }
1234 
1235 static unsigned long
lazy_rcu_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1236 lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1237 {
1238 	int cpu;
1239 	unsigned long flags;
1240 	unsigned long count = 0;
1241 
1242 	if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask)))
1243 		return 0;
1244 	/*
1245 	 * Protect against concurrent (de-)offloading. Otherwise nocb locking
1246 	 * may be ignored or imbalanced.
1247 	 */
1248 	if (!mutex_trylock(&rcu_state.nocb_mutex)) {
1249 		/*
1250 		 * But really don't insist if nocb_mutex is contended since we
1251 		 * can't guarantee that it will never engage in a dependency
1252 		 * chain involving memory allocation. The lock is seldom contended
1253 		 * anyway.
1254 		 */
1255 		return 0;
1256 	}
1257 
1258 	/* Snapshot count of all CPUs */
1259 	for_each_cpu(cpu, rcu_nocb_mask) {
1260 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1261 		int _count;
1262 
1263 		if (WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp)))
1264 			continue;
1265 
1266 		if (!READ_ONCE(rdp->lazy_len))
1267 			continue;
1268 
1269 		rcu_nocb_lock_irqsave(rdp, flags);
1270 		/*
1271 		 * Recheck under the nocb lock. Since we are not holding the bypass
1272 		 * lock we may still race with increments from the enqueuer but still
1273 		 * we know for sure if there is at least one lazy callback.
1274 		 */
1275 		_count = READ_ONCE(rdp->lazy_len);
1276 		if (!_count) {
1277 			rcu_nocb_unlock_irqrestore(rdp, flags);
1278 			continue;
1279 		}
1280 		rcu_nocb_try_flush_bypass(rdp, jiffies);
1281 		rcu_nocb_unlock_irqrestore(rdp, flags);
1282 		wake_nocb_gp(rdp, false);
1283 		sc->nr_to_scan -= _count;
1284 		count += _count;
1285 		if (sc->nr_to_scan <= 0)
1286 			break;
1287 	}
1288 
1289 	mutex_unlock(&rcu_state.nocb_mutex);
1290 
1291 	return count ? count : SHRINK_STOP;
1292 }
1293 #endif // #ifdef CONFIG_RCU_LAZY
1294 
rcu_init_nohz(void)1295 void __init rcu_init_nohz(void)
1296 {
1297 	int cpu;
1298 	struct rcu_data *rdp;
1299 	const struct cpumask *cpumask = NULL;
1300 	struct shrinker * __maybe_unused lazy_rcu_shrinker;
1301 
1302 #if defined(CONFIG_NO_HZ_FULL)
1303 	if (tick_nohz_full_running && !cpumask_empty(tick_nohz_full_mask))
1304 		cpumask = tick_nohz_full_mask;
1305 #endif
1306 
1307 	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_DEFAULT_ALL) &&
1308 	    !rcu_state.nocb_is_setup && !cpumask)
1309 		cpumask = cpu_possible_mask;
1310 
1311 	if (cpumask) {
1312 		if (!cpumask_available(rcu_nocb_mask)) {
1313 			if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
1314 				pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
1315 				return;
1316 			}
1317 		}
1318 
1319 		cpumask_or(rcu_nocb_mask, rcu_nocb_mask, cpumask);
1320 		rcu_state.nocb_is_setup = true;
1321 	}
1322 
1323 	if (!rcu_state.nocb_is_setup)
1324 		return;
1325 
1326 #ifdef CONFIG_RCU_LAZY
1327 	lazy_rcu_shrinker = shrinker_alloc(0, "rcu-lazy");
1328 	if (!lazy_rcu_shrinker) {
1329 		pr_err("Failed to allocate lazy_rcu shrinker!\n");
1330 	} else {
1331 		lazy_rcu_shrinker->count_objects = lazy_rcu_shrink_count;
1332 		lazy_rcu_shrinker->scan_objects = lazy_rcu_shrink_scan;
1333 
1334 		shrinker_register(lazy_rcu_shrinker);
1335 	}
1336 #endif // #ifdef CONFIG_RCU_LAZY
1337 
1338 	if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
1339 		pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
1340 		cpumask_and(rcu_nocb_mask, cpu_possible_mask,
1341 			    rcu_nocb_mask);
1342 	}
1343 	if (cpumask_empty(rcu_nocb_mask))
1344 		pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
1345 	else
1346 		pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
1347 			cpumask_pr_args(rcu_nocb_mask));
1348 	if (rcu_nocb_poll)
1349 		pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
1350 
1351 	for_each_cpu(cpu, rcu_nocb_mask) {
1352 		rdp = per_cpu_ptr(&rcu_data, cpu);
1353 		if (rcu_segcblist_empty(&rdp->cblist))
1354 			rcu_segcblist_init(&rdp->cblist);
1355 		rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
1356 	}
1357 	rcu_organize_nocb_kthreads();
1358 }
1359 
1360 /* Initialize per-rcu_data variables for no-CBs CPUs. */
rcu_boot_init_nocb_percpu_data(struct rcu_data * rdp)1361 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1362 {
1363 	init_swait_queue_head(&rdp->nocb_cb_wq);
1364 	init_swait_queue_head(&rdp->nocb_gp_wq);
1365 	init_swait_queue_head(&rdp->nocb_state_wq);
1366 	raw_spin_lock_init(&rdp->nocb_lock);
1367 	raw_spin_lock_init(&rdp->nocb_bypass_lock);
1368 	raw_spin_lock_init(&rdp->nocb_gp_lock);
1369 	timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
1370 	rcu_cblist_init(&rdp->nocb_bypass);
1371 	WRITE_ONCE(rdp->lazy_len, 0);
1372 	mutex_init(&rdp->nocb_gp_kthread_mutex);
1373 }
1374 
1375 /*
1376  * If the specified CPU is a no-CBs CPU that does not already have its
1377  * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
1378  * for this CPU's group has not yet been created, spawn it as well.
1379  */
rcu_spawn_cpu_nocb_kthread(int cpu)1380 static void rcu_spawn_cpu_nocb_kthread(int cpu)
1381 {
1382 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1383 	struct rcu_data *rdp_gp;
1384 	struct task_struct *t;
1385 	struct sched_param sp;
1386 
1387 	if (!rcu_scheduler_fully_active || !rcu_state.nocb_is_setup)
1388 		return;
1389 
1390 	/* If there already is an rcuo kthread, then nothing to do. */
1391 	if (rdp->nocb_cb_kthread)
1392 		return;
1393 
1394 	/* If we didn't spawn the GP kthread first, reorganize! */
1395 	sp.sched_priority = kthread_prio;
1396 	rdp_gp = rdp->nocb_gp_rdp;
1397 	mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
1398 	if (!rdp_gp->nocb_gp_kthread) {
1399 		t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
1400 				"rcuog/%d", rdp_gp->cpu);
1401 		if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) {
1402 			mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
1403 			goto err;
1404 		}
1405 		WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
1406 		if (kthread_prio)
1407 			sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1408 	}
1409 	mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
1410 
1411 	/* Spawn the kthread for this CPU. */
1412 	t = kthread_create(rcu_nocb_cb_kthread, rdp,
1413 			   "rcuo%c/%d", rcu_state.abbr, cpu);
1414 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
1415 		goto err;
1416 
1417 	if (rcu_rdp_is_offloaded(rdp))
1418 		wake_up_process(t);
1419 	else
1420 		kthread_park(t);
1421 
1422 	if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_CB_BOOST) && kthread_prio)
1423 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1424 
1425 	WRITE_ONCE(rdp->nocb_cb_kthread, t);
1426 	WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
1427 	return;
1428 
1429 err:
1430 	/*
1431 	 * No need to protect against concurrent rcu_barrier()
1432 	 * because the number of callbacks should be 0 for a non-boot CPU,
1433 	 * therefore rcu_barrier() shouldn't even try to grab the nocb_lock.
1434 	 * But hold nocb_mutex to avoid nocb_lock imbalance from shrinker.
1435 	 */
1436 	WARN_ON_ONCE(system_state > SYSTEM_BOOTING && rcu_segcblist_n_cbs(&rdp->cblist));
1437 	mutex_lock(&rcu_state.nocb_mutex);
1438 	if (rcu_rdp_is_offloaded(rdp)) {
1439 		rcu_nocb_rdp_deoffload(rdp);
1440 		cpumask_clear_cpu(cpu, rcu_nocb_mask);
1441 	}
1442 	mutex_unlock(&rcu_state.nocb_mutex);
1443 }
1444 
1445 /* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
1446 static int rcu_nocb_gp_stride = -1;
1447 module_param(rcu_nocb_gp_stride, int, 0444);
1448 
1449 /*
1450  * Initialize GP-CB relationships for all no-CBs CPU.
1451  */
rcu_organize_nocb_kthreads(void)1452 static void __init rcu_organize_nocb_kthreads(void)
1453 {
1454 	int cpu;
1455 	bool firsttime = true;
1456 	bool gotnocbs = false;
1457 	bool gotnocbscbs = true;
1458 	int ls = rcu_nocb_gp_stride;
1459 	int nl = 0;  /* Next GP kthread. */
1460 	struct rcu_data *rdp;
1461 	struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
1462 
1463 	if (!cpumask_available(rcu_nocb_mask))
1464 		return;
1465 	if (ls == -1) {
1466 		ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
1467 		rcu_nocb_gp_stride = ls;
1468 	}
1469 
1470 	/*
1471 	 * Each pass through this loop sets up one rcu_data structure.
1472 	 * Should the corresponding CPU come online in the future, then
1473 	 * we will spawn the needed set of rcu_nocb_kthread() kthreads.
1474 	 */
1475 	for_each_possible_cpu(cpu) {
1476 		rdp = per_cpu_ptr(&rcu_data, cpu);
1477 		if (rdp->cpu >= nl) {
1478 			/* New GP kthread, set up for CBs & next GP. */
1479 			gotnocbs = true;
1480 			nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
1481 			rdp_gp = rdp;
1482 			INIT_LIST_HEAD(&rdp->nocb_head_rdp);
1483 			if (dump_tree) {
1484 				if (!firsttime)
1485 					pr_cont("%s\n", gotnocbscbs
1486 							? "" : " (self only)");
1487 				gotnocbscbs = false;
1488 				firsttime = false;
1489 				pr_alert("%s: No-CB GP kthread CPU %d:",
1490 					 __func__, cpu);
1491 			}
1492 		} else {
1493 			/* Another CB kthread, link to previous GP kthread. */
1494 			gotnocbscbs = true;
1495 			if (dump_tree)
1496 				pr_cont(" %d", cpu);
1497 		}
1498 		rdp->nocb_gp_rdp = rdp_gp;
1499 		if (cpumask_test_cpu(cpu, rcu_nocb_mask))
1500 			list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
1501 	}
1502 	if (gotnocbs && dump_tree)
1503 		pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
1504 }
1505 
1506 /*
1507  * Bind the current task to the offloaded CPUs.  If there are no offloaded
1508  * CPUs, leave the task unbound.  Splat if the bind attempt fails.
1509  */
rcu_bind_current_to_nocb(void)1510 void rcu_bind_current_to_nocb(void)
1511 {
1512 	if (cpumask_available(rcu_nocb_mask) && !cpumask_empty(rcu_nocb_mask))
1513 		WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
1514 }
1515 EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
1516 
1517 // The ->on_cpu field is available only in CONFIG_SMP=y, so...
1518 #ifdef CONFIG_SMP
show_rcu_should_be_on_cpu(struct task_struct * tsp)1519 static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1520 {
1521 	return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
1522 }
1523 #else // #ifdef CONFIG_SMP
show_rcu_should_be_on_cpu(struct task_struct * tsp)1524 static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1525 {
1526 	return "";
1527 }
1528 #endif // #else #ifdef CONFIG_SMP
1529 
1530 /*
1531  * Dump out nocb grace-period kthread state for the specified rcu_data
1532  * structure.
1533  */
show_rcu_nocb_gp_state(struct rcu_data * rdp)1534 static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
1535 {
1536 	struct rcu_node *rnp = rdp->mynode;
1537 
1538 	pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
1539 		rdp->cpu,
1540 		"kK"[!!rdp->nocb_gp_kthread],
1541 		"lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
1542 		"dD"[!!rdp->nocb_defer_wakeup],
1543 		"tT"[timer_pending(&rdp->nocb_timer)],
1544 		"sS"[!!rdp->nocb_gp_sleep],
1545 		".W"[swait_active(&rdp->nocb_gp_wq)],
1546 		".W"[swait_active(&rnp->nocb_gp_wq[0])],
1547 		".W"[swait_active(&rnp->nocb_gp_wq[1])],
1548 		".B"[!!rdp->nocb_gp_bypass],
1549 		".G"[!!rdp->nocb_gp_gp],
1550 		(long)rdp->nocb_gp_seq,
1551 		rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
1552 		rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
1553 		rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
1554 		show_rcu_should_be_on_cpu(rdp->nocb_gp_kthread));
1555 }
1556 
1557 /* Dump out nocb kthread state for the specified rcu_data structure. */
show_rcu_nocb_state(struct rcu_data * rdp)1558 static void show_rcu_nocb_state(struct rcu_data *rdp)
1559 {
1560 	char bufw[20];
1561 	char bufr[20];
1562 	struct rcu_data *nocb_next_rdp;
1563 	struct rcu_segcblist *rsclp = &rdp->cblist;
1564 	bool waslocked;
1565 	bool wassleep;
1566 
1567 	if (rdp->nocb_gp_rdp == rdp)
1568 		show_rcu_nocb_gp_state(rdp);
1569 
1570 	nocb_next_rdp = list_next_or_null_rcu(&rdp->nocb_gp_rdp->nocb_head_rdp,
1571 					      &rdp->nocb_entry_rdp,
1572 					      typeof(*rdp),
1573 					      nocb_entry_rdp);
1574 
1575 	sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
1576 	sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
1577 	pr_info("   CB %d^%d->%d %c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
1578 		rdp->cpu, rdp->nocb_gp_rdp->cpu,
1579 		nocb_next_rdp ? nocb_next_rdp->cpu : -1,
1580 		"kK"[!!rdp->nocb_cb_kthread],
1581 		"bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
1582 		"lL"[raw_spin_is_locked(&rdp->nocb_lock)],
1583 		"sS"[!!rdp->nocb_cb_sleep],
1584 		".W"[swait_active(&rdp->nocb_cb_wq)],
1585 		jiffies - rdp->nocb_bypass_first,
1586 		jiffies - rdp->nocb_nobypass_last,
1587 		rdp->nocb_nobypass_count,
1588 		".D"[rcu_segcblist_ready_cbs(rsclp)],
1589 		".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
1590 		rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
1591 		".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
1592 		rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
1593 		".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
1594 		".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
1595 		rcu_segcblist_n_cbs(&rdp->cblist),
1596 		rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
1597 		rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1,
1598 		show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
1599 
1600 	/* It is OK for GP kthreads to have GP state. */
1601 	if (rdp->nocb_gp_rdp == rdp)
1602 		return;
1603 
1604 	waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
1605 	wassleep = swait_active(&rdp->nocb_gp_wq);
1606 	if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
1607 		return;  /* Nothing untoward. */
1608 
1609 	pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
1610 		"lL"[waslocked],
1611 		"dD"[!!rdp->nocb_defer_wakeup],
1612 		"sS"[!!rdp->nocb_gp_sleep],
1613 		".W"[wassleep]);
1614 }
1615 
1616 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
1617 
1618 /* No ->nocb_lock to acquire.  */
rcu_nocb_lock(struct rcu_data * rdp)1619 static void rcu_nocb_lock(struct rcu_data *rdp)
1620 {
1621 }
1622 
1623 /* No ->nocb_lock to release.  */
rcu_nocb_unlock(struct rcu_data * rdp)1624 static void rcu_nocb_unlock(struct rcu_data *rdp)
1625 {
1626 }
1627 
1628 /* No ->nocb_lock to release.  */
rcu_nocb_unlock_irqrestore(struct rcu_data * rdp,unsigned long flags)1629 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
1630 				       unsigned long flags)
1631 {
1632 	local_irq_restore(flags);
1633 }
1634 
1635 /* Lockdep check that ->cblist may be safely accessed. */
rcu_lockdep_assert_cblist_protected(struct rcu_data * rdp)1636 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
1637 {
1638 	lockdep_assert_irqs_disabled();
1639 }
1640 
rcu_nocb_gp_cleanup(struct swait_queue_head * sq)1641 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1642 {
1643 }
1644 
rcu_nocb_gp_get(struct rcu_node * rnp)1645 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1646 {
1647 	return NULL;
1648 }
1649 
rcu_init_one_nocb(struct rcu_node * rnp)1650 static void rcu_init_one_nocb(struct rcu_node *rnp)
1651 {
1652 }
1653 
wake_nocb_gp(struct rcu_data * rdp,bool force)1654 static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
1655 {
1656 	return false;
1657 }
1658 
rcu_nocb_flush_bypass(struct rcu_data * rdp,struct rcu_head * rhp,unsigned long j,bool lazy)1659 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1660 				  unsigned long j, bool lazy)
1661 {
1662 	return true;
1663 }
1664 
call_rcu_nocb(struct rcu_data * rdp,struct rcu_head * head,rcu_callback_t func,unsigned long flags,bool lazy)1665 static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
1666 			  rcu_callback_t func, unsigned long flags, bool lazy)
1667 {
1668 	WARN_ON_ONCE(1);  /* Should be dead code! */
1669 }
1670 
__call_rcu_nocb_wake(struct rcu_data * rdp,bool was_empty,unsigned long flags)1671 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
1672 				 unsigned long flags)
1673 {
1674 	WARN_ON_ONCE(1);  /* Should be dead code! */
1675 }
1676 
rcu_boot_init_nocb_percpu_data(struct rcu_data * rdp)1677 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1678 {
1679 }
1680 
rcu_nocb_need_deferred_wakeup(struct rcu_data * rdp,int level)1681 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
1682 {
1683 	return false;
1684 }
1685 
do_nocb_deferred_wakeup(struct rcu_data * rdp)1686 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
1687 {
1688 	return false;
1689 }
1690 
rcu_spawn_cpu_nocb_kthread(int cpu)1691 static void rcu_spawn_cpu_nocb_kthread(int cpu)
1692 {
1693 }
1694 
show_rcu_nocb_state(struct rcu_data * rdp)1695 static void show_rcu_nocb_state(struct rcu_data *rdp)
1696 {
1697 }
1698 
1699 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
1700