xref: /linux/kernel/rcu/tasks.h (revision 9f7861c56b51b84d30114e7fea9d744a9d5ba9b7)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Task-based RCU implementations.
4  *
5  * Copyright (C) 2020 Paul E. McKenney
6  */
7 
8 #ifdef CONFIG_TASKS_RCU_GENERIC
9 #include "rcu_segcblist.h"
10 
11 ////////////////////////////////////////////////////////////////////////
12 //
13 // Generic data structures.
14 
15 struct rcu_tasks;
16 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
17 typedef void (*pregp_func_t)(struct list_head *hop);
18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
19 typedef void (*postscan_func_t)(struct list_head *hop);
20 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
21 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
22 
23 /**
24  * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
25  * @cblist: Callback list.
26  * @lock: Lock protecting per-CPU callback list.
27  * @rtp_jiffies: Jiffies counter value for statistics.
28  * @lazy_timer: Timer to unlazify callbacks.
29  * @urgent_gp: Number of additional non-lazy grace periods.
30  * @rtp_n_lock_retries: Rough lock-contention statistic.
31  * @rtp_work: Work queue for invoking callbacks.
32  * @rtp_irq_work: IRQ work queue for deferred wakeups.
33  * @barrier_q_head: RCU callback for barrier operation.
34  * @rtp_blkd_tasks: List of tasks blocked as readers.
35  * @cpu: CPU number corresponding to this entry.
36  * @rtpp: Pointer to the rcu_tasks structure.
37  */
38 struct rcu_tasks_percpu {
39 	struct rcu_segcblist cblist;
40 	raw_spinlock_t __private lock;
41 	unsigned long rtp_jiffies;
42 	unsigned long rtp_n_lock_retries;
43 	struct timer_list lazy_timer;
44 	unsigned int urgent_gp;
45 	struct work_struct rtp_work;
46 	struct irq_work rtp_irq_work;
47 	struct rcu_head barrier_q_head;
48 	struct list_head rtp_blkd_tasks;
49 	int cpu;
50 	struct rcu_tasks *rtpp;
51 };
52 
53 /**
54  * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
55  * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
56  * @cbs_gbl_lock: Lock protecting callback list.
57  * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
58  * @gp_func: This flavor's grace-period-wait function.
59  * @gp_state: Grace period's most recent state transition (debugging).
60  * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
61  * @init_fract: Initial backoff sleep interval.
62  * @gp_jiffies: Time of last @gp_state transition.
63  * @gp_start: Most recent grace-period start in jiffies.
64  * @tasks_gp_seq: Number of grace periods completed since boot.
65  * @n_ipis: Number of IPIs sent to encourage grace periods to end.
66  * @n_ipis_fails: Number of IPI-send failures.
67  * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
68  * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy.
69  * @pregp_func: This flavor's pre-grace-period function (optional).
70  * @pertask_func: This flavor's per-task scan function (optional).
71  * @postscan_func: This flavor's post-task scan function (optional).
72  * @holdouts_func: This flavor's holdout-list scan function (optional).
73  * @postgp_func: This flavor's post-grace-period function (optional).
74  * @call_func: This flavor's call_rcu()-equivalent function.
75  * @rtpcpu: This flavor's rcu_tasks_percpu structure.
76  * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
77  * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
78  * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
79  * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
80  * @barrier_q_mutex: Serialize barrier operations.
81  * @barrier_q_count: Number of queues being waited on.
82  * @barrier_q_completion: Barrier wait/wakeup mechanism.
83  * @barrier_q_seq: Sequence number for barrier operations.
84  * @name: This flavor's textual name.
85  * @kname: This flavor's kthread name.
86  */
87 struct rcu_tasks {
88 	struct rcuwait cbs_wait;
89 	raw_spinlock_t cbs_gbl_lock;
90 	struct mutex tasks_gp_mutex;
91 	int gp_state;
92 	int gp_sleep;
93 	int init_fract;
94 	unsigned long gp_jiffies;
95 	unsigned long gp_start;
96 	unsigned long tasks_gp_seq;
97 	unsigned long n_ipis;
98 	unsigned long n_ipis_fails;
99 	struct task_struct *kthread_ptr;
100 	unsigned long lazy_jiffies;
101 	rcu_tasks_gp_func_t gp_func;
102 	pregp_func_t pregp_func;
103 	pertask_func_t pertask_func;
104 	postscan_func_t postscan_func;
105 	holdouts_func_t holdouts_func;
106 	postgp_func_t postgp_func;
107 	call_rcu_func_t call_func;
108 	struct rcu_tasks_percpu __percpu *rtpcpu;
109 	int percpu_enqueue_shift;
110 	int percpu_enqueue_lim;
111 	int percpu_dequeue_lim;
112 	unsigned long percpu_dequeue_gpseq;
113 	struct mutex barrier_q_mutex;
114 	atomic_t barrier_q_count;
115 	struct completion barrier_q_completion;
116 	unsigned long barrier_q_seq;
117 	char *name;
118 	char *kname;
119 };
120 
121 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
122 
123 #define DEFINE_RCU_TASKS(rt_name, gp, call, n)						\
124 static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = {			\
125 	.lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock),		\
126 	.rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup),			\
127 };											\
128 static struct rcu_tasks rt_name =							\
129 {											\
130 	.cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait),				\
131 	.cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock),			\
132 	.tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex),			\
133 	.gp_func = gp,									\
134 	.call_func = call,								\
135 	.rtpcpu = &rt_name ## __percpu,							\
136 	.lazy_jiffies = DIV_ROUND_UP(HZ, 4),						\
137 	.name = n,									\
138 	.percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS),				\
139 	.percpu_enqueue_lim = 1,							\
140 	.percpu_dequeue_lim = 1,							\
141 	.barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex),		\
142 	.barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT,				\
143 	.kname = #rt_name,								\
144 }
145 
146 #ifdef CONFIG_TASKS_RCU
147 /* Track exiting tasks in order to allow them to be waited for. */
148 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
149 
150 /* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */
151 static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
152 static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
153 #endif
154 
155 /* Avoid IPIing CPUs early in the grace period. */
156 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
157 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
158 module_param(rcu_task_ipi_delay, int, 0644);
159 
160 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
161 #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
162 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
163 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
164 module_param(rcu_task_stall_timeout, int, 0644);
165 #define RCU_TASK_STALL_INFO (HZ * 10)
166 static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
167 module_param(rcu_task_stall_info, int, 0644);
168 static int rcu_task_stall_info_mult __read_mostly = 3;
169 module_param(rcu_task_stall_info_mult, int, 0444);
170 
171 static int rcu_task_enqueue_lim __read_mostly = -1;
172 module_param(rcu_task_enqueue_lim, int, 0444);
173 
174 static bool rcu_task_cb_adjust;
175 static int rcu_task_contend_lim __read_mostly = 100;
176 module_param(rcu_task_contend_lim, int, 0444);
177 static int rcu_task_collapse_lim __read_mostly = 10;
178 module_param(rcu_task_collapse_lim, int, 0444);
179 static int rcu_task_lazy_lim __read_mostly = 32;
180 module_param(rcu_task_lazy_lim, int, 0444);
181 
182 /* RCU tasks grace-period state for debugging. */
183 #define RTGS_INIT		 0
184 #define RTGS_WAIT_WAIT_CBS	 1
185 #define RTGS_WAIT_GP		 2
186 #define RTGS_PRE_WAIT_GP	 3
187 #define RTGS_SCAN_TASKLIST	 4
188 #define RTGS_POST_SCAN_TASKLIST	 5
189 #define RTGS_WAIT_SCAN_HOLDOUTS	 6
190 #define RTGS_SCAN_HOLDOUTS	 7
191 #define RTGS_POST_GP		 8
192 #define RTGS_WAIT_READERS	 9
193 #define RTGS_INVOKE_CBS		10
194 #define RTGS_WAIT_CBS		11
195 #ifndef CONFIG_TINY_RCU
196 static const char * const rcu_tasks_gp_state_names[] = {
197 	"RTGS_INIT",
198 	"RTGS_WAIT_WAIT_CBS",
199 	"RTGS_WAIT_GP",
200 	"RTGS_PRE_WAIT_GP",
201 	"RTGS_SCAN_TASKLIST",
202 	"RTGS_POST_SCAN_TASKLIST",
203 	"RTGS_WAIT_SCAN_HOLDOUTS",
204 	"RTGS_SCAN_HOLDOUTS",
205 	"RTGS_POST_GP",
206 	"RTGS_WAIT_READERS",
207 	"RTGS_INVOKE_CBS",
208 	"RTGS_WAIT_CBS",
209 };
210 #endif /* #ifndef CONFIG_TINY_RCU */
211 
212 ////////////////////////////////////////////////////////////////////////
213 //
214 // Generic code.
215 
216 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
217 
218 /* Record grace-period phase and time. */
219 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
220 {
221 	rtp->gp_state = newstate;
222 	rtp->gp_jiffies = jiffies;
223 }
224 
225 #ifndef CONFIG_TINY_RCU
226 /* Return state name. */
227 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
228 {
229 	int i = data_race(rtp->gp_state); // Let KCSAN detect update races
230 	int j = READ_ONCE(i); // Prevent the compiler from reading twice
231 
232 	if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
233 		return "???";
234 	return rcu_tasks_gp_state_names[j];
235 }
236 #endif /* #ifndef CONFIG_TINY_RCU */
237 
238 // Initialize per-CPU callback lists for the specified flavor of
239 // Tasks RCU.  Do not enqueue callbacks before this function is invoked.
240 static void cblist_init_generic(struct rcu_tasks *rtp)
241 {
242 	int cpu;
243 	unsigned long flags;
244 	int lim;
245 	int shift;
246 
247 	if (rcu_task_enqueue_lim < 0) {
248 		rcu_task_enqueue_lim = 1;
249 		rcu_task_cb_adjust = true;
250 	} else if (rcu_task_enqueue_lim == 0) {
251 		rcu_task_enqueue_lim = 1;
252 	}
253 	lim = rcu_task_enqueue_lim;
254 
255 	if (lim > nr_cpu_ids)
256 		lim = nr_cpu_ids;
257 	shift = ilog2(nr_cpu_ids / lim);
258 	if (((nr_cpu_ids - 1) >> shift) >= lim)
259 		shift++;
260 	WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
261 	WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
262 	smp_store_release(&rtp->percpu_enqueue_lim, lim);
263 	for_each_possible_cpu(cpu) {
264 		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
265 
266 		WARN_ON_ONCE(!rtpcp);
267 		if (cpu)
268 			raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
269 		local_irq_save(flags);  // serialize initialization
270 		if (rcu_segcblist_empty(&rtpcp->cblist))
271 			rcu_segcblist_init(&rtpcp->cblist);
272 		local_irq_restore(flags);
273 		INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
274 		rtpcp->cpu = cpu;
275 		rtpcp->rtpp = rtp;
276 		if (!rtpcp->rtp_blkd_tasks.next)
277 			INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
278 	}
279 
280 	pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name,
281 			data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust);
282 }
283 
284 // Compute wakeup time for lazy callback timer.
285 static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp)
286 {
287 	return jiffies + rtp->lazy_jiffies;
288 }
289 
290 // Timer handler that unlazifies lazy callbacks.
291 static void call_rcu_tasks_generic_timer(struct timer_list *tlp)
292 {
293 	unsigned long flags;
294 	bool needwake = false;
295 	struct rcu_tasks *rtp;
296 	struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer);
297 
298 	rtp = rtpcp->rtpp;
299 	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
300 	if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) {
301 		if (!rtpcp->urgent_gp)
302 			rtpcp->urgent_gp = 1;
303 		needwake = true;
304 		mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
305 	}
306 	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
307 	if (needwake)
308 		rcuwait_wake_up(&rtp->cbs_wait);
309 }
310 
311 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
312 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
313 {
314 	struct rcu_tasks *rtp;
315 	struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
316 
317 	rtp = rtpcp->rtpp;
318 	rcuwait_wake_up(&rtp->cbs_wait);
319 }
320 
321 // Enqueue a callback for the specified flavor of Tasks RCU.
322 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
323 				   struct rcu_tasks *rtp)
324 {
325 	int chosen_cpu;
326 	unsigned long flags;
327 	bool havekthread = smp_load_acquire(&rtp->kthread_ptr);
328 	int ideal_cpu;
329 	unsigned long j;
330 	bool needadjust = false;
331 	bool needwake;
332 	struct rcu_tasks_percpu *rtpcp;
333 
334 	rhp->next = NULL;
335 	rhp->func = func;
336 	local_irq_save(flags);
337 	rcu_read_lock();
338 	ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
339 	chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
340 	rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
341 	if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
342 		raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
343 		j = jiffies;
344 		if (rtpcp->rtp_jiffies != j) {
345 			rtpcp->rtp_jiffies = j;
346 			rtpcp->rtp_n_lock_retries = 0;
347 		}
348 		if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
349 		    READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
350 			needadjust = true;  // Defer adjustment to avoid deadlock.
351 	}
352 	// Queuing callbacks before initialization not yet supported.
353 	if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
354 		rcu_segcblist_init(&rtpcp->cblist);
355 	needwake = (func == wakeme_after_rcu) ||
356 		   (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim);
357 	if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) {
358 		if (rtp->lazy_jiffies)
359 			mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
360 		else
361 			needwake = rcu_segcblist_empty(&rtpcp->cblist);
362 	}
363 	if (needwake)
364 		rtpcp->urgent_gp = 3;
365 	rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
366 	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
367 	if (unlikely(needadjust)) {
368 		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
369 		if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
370 			WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
371 			WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
372 			smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
373 			pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
374 		}
375 		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
376 	}
377 	rcu_read_unlock();
378 	/* We can't create the thread unless interrupts are enabled. */
379 	if (needwake && READ_ONCE(rtp->kthread_ptr))
380 		irq_work_queue(&rtpcp->rtp_irq_work);
381 }
382 
383 // RCU callback function for rcu_barrier_tasks_generic().
384 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
385 {
386 	struct rcu_tasks *rtp;
387 	struct rcu_tasks_percpu *rtpcp;
388 
389 	rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
390 	rtp = rtpcp->rtpp;
391 	if (atomic_dec_and_test(&rtp->barrier_q_count))
392 		complete(&rtp->barrier_q_completion);
393 }
394 
395 // Wait for all in-flight callbacks for the specified RCU Tasks flavor.
396 // Operates in a manner similar to rcu_barrier().
397 static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
398 {
399 	int cpu;
400 	unsigned long flags;
401 	struct rcu_tasks_percpu *rtpcp;
402 	unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
403 
404 	mutex_lock(&rtp->barrier_q_mutex);
405 	if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
406 		smp_mb();
407 		mutex_unlock(&rtp->barrier_q_mutex);
408 		return;
409 	}
410 	rcu_seq_start(&rtp->barrier_q_seq);
411 	init_completion(&rtp->barrier_q_completion);
412 	atomic_set(&rtp->barrier_q_count, 2);
413 	for_each_possible_cpu(cpu) {
414 		if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
415 			break;
416 		rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
417 		rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
418 		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
419 		if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
420 			atomic_inc(&rtp->barrier_q_count);
421 		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
422 	}
423 	if (atomic_sub_and_test(2, &rtp->barrier_q_count))
424 		complete(&rtp->barrier_q_completion);
425 	wait_for_completion(&rtp->barrier_q_completion);
426 	rcu_seq_end(&rtp->barrier_q_seq);
427 	mutex_unlock(&rtp->barrier_q_mutex);
428 }
429 
430 // Advance callbacks and indicate whether either a grace period or
431 // callback invocation is needed.
432 static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
433 {
434 	int cpu;
435 	int dequeue_limit;
436 	unsigned long flags;
437 	bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
438 	long n;
439 	long ncbs = 0;
440 	long ncbsnz = 0;
441 	int needgpcb = 0;
442 
443 	dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
444 	for (cpu = 0; cpu < dequeue_limit; cpu++) {
445 		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
446 
447 		/* Advance and accelerate any new callbacks. */
448 		if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
449 			continue;
450 		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
451 		// Should we shrink down to a single callback queue?
452 		n = rcu_segcblist_n_cbs(&rtpcp->cblist);
453 		if (n) {
454 			ncbs += n;
455 			if (cpu > 0)
456 				ncbsnz += n;
457 		}
458 		rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
459 		(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
460 		if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
461 			if (rtp->lazy_jiffies)
462 				rtpcp->urgent_gp--;
463 			needgpcb |= 0x3;
464 		} else if (rcu_segcblist_empty(&rtpcp->cblist)) {
465 			rtpcp->urgent_gp = 0;
466 		}
467 		if (rcu_segcblist_ready_cbs(&rtpcp->cblist))
468 			needgpcb |= 0x1;
469 		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
470 	}
471 
472 	// Shrink down to a single callback queue if appropriate.
473 	// This is done in two stages: (1) If there are no more than
474 	// rcu_task_collapse_lim callbacks on CPU 0 and none on any other
475 	// CPU, limit enqueueing to CPU 0.  (2) After an RCU grace period,
476 	// if there has not been an increase in callbacks, limit dequeuing
477 	// to CPU 0.  Note the matching RCU read-side critical section in
478 	// call_rcu_tasks_generic().
479 	if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
480 		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
481 		if (rtp->percpu_enqueue_lim > 1) {
482 			WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
483 			smp_store_release(&rtp->percpu_enqueue_lim, 1);
484 			rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
485 			gpdone = false;
486 			pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
487 		}
488 		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
489 	}
490 	if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
491 		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
492 		if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
493 			WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
494 			pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
495 		}
496 		if (rtp->percpu_dequeue_lim == 1) {
497 			for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
498 				struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
499 
500 				WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
501 			}
502 		}
503 		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
504 	}
505 
506 	return needgpcb;
507 }
508 
509 // Advance callbacks and invoke any that are ready.
510 static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
511 {
512 	int cpu;
513 	int cpunext;
514 	int cpuwq;
515 	unsigned long flags;
516 	int len;
517 	struct rcu_head *rhp;
518 	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
519 	struct rcu_tasks_percpu *rtpcp_next;
520 
521 	cpu = rtpcp->cpu;
522 	cpunext = cpu * 2 + 1;
523 	if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
524 		rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
525 		cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
526 		queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
527 		cpunext++;
528 		if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
529 			rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
530 			cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
531 			queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
532 		}
533 	}
534 
535 	if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
536 		return;
537 	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
538 	rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
539 	rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
540 	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
541 	len = rcl.len;
542 	for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
543 		debug_rcu_head_callback(rhp);
544 		local_bh_disable();
545 		rhp->func(rhp);
546 		local_bh_enable();
547 		cond_resched();
548 	}
549 	raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
550 	rcu_segcblist_add_len(&rtpcp->cblist, -len);
551 	(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
552 	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
553 }
554 
555 // Workqueue flood to advance callbacks and invoke any that are ready.
556 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
557 {
558 	struct rcu_tasks *rtp;
559 	struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
560 
561 	rtp = rtpcp->rtpp;
562 	rcu_tasks_invoke_cbs(rtp, rtpcp);
563 }
564 
565 // Wait for one grace period.
566 static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
567 {
568 	int needgpcb;
569 
570 	mutex_lock(&rtp->tasks_gp_mutex);
571 
572 	// If there were none, wait a bit and start over.
573 	if (unlikely(midboot)) {
574 		needgpcb = 0x2;
575 	} else {
576 		mutex_unlock(&rtp->tasks_gp_mutex);
577 		set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
578 		rcuwait_wait_event(&rtp->cbs_wait,
579 				   (needgpcb = rcu_tasks_need_gpcb(rtp)),
580 				   TASK_IDLE);
581 		mutex_lock(&rtp->tasks_gp_mutex);
582 	}
583 
584 	if (needgpcb & 0x2) {
585 		// Wait for one grace period.
586 		set_tasks_gp_state(rtp, RTGS_WAIT_GP);
587 		rtp->gp_start = jiffies;
588 		rcu_seq_start(&rtp->tasks_gp_seq);
589 		rtp->gp_func(rtp);
590 		rcu_seq_end(&rtp->tasks_gp_seq);
591 	}
592 
593 	// Invoke callbacks.
594 	set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
595 	rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
596 	mutex_unlock(&rtp->tasks_gp_mutex);
597 }
598 
599 // RCU-tasks kthread that detects grace periods and invokes callbacks.
600 static int __noreturn rcu_tasks_kthread(void *arg)
601 {
602 	int cpu;
603 	struct rcu_tasks *rtp = arg;
604 
605 	for_each_possible_cpu(cpu) {
606 		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
607 
608 		timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0);
609 		rtpcp->urgent_gp = 1;
610 	}
611 
612 	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
613 	housekeeping_affine(current, HK_TYPE_RCU);
614 	smp_store_release(&rtp->kthread_ptr, current); // Let GPs start!
615 
616 	/*
617 	 * Each pass through the following loop makes one check for
618 	 * newly arrived callbacks, and, if there are some, waits for
619 	 * one RCU-tasks grace period and then invokes the callbacks.
620 	 * This loop is terminated by the system going down.  ;-)
621 	 */
622 	for (;;) {
623 		// Wait for one grace period and invoke any callbacks
624 		// that are ready.
625 		rcu_tasks_one_gp(rtp, false);
626 
627 		// Paranoid sleep to keep this from entering a tight loop.
628 		schedule_timeout_idle(rtp->gp_sleep);
629 	}
630 }
631 
632 // Wait for a grace period for the specified flavor of Tasks RCU.
633 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
634 {
635 	/* Complain if the scheduler has not started.  */
636 	if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
637 			 "synchronize_%s() called too soon", rtp->name))
638 		return;
639 
640 	// If the grace-period kthread is running, use it.
641 	if (READ_ONCE(rtp->kthread_ptr)) {
642 		wait_rcu_gp(rtp->call_func);
643 		return;
644 	}
645 	rcu_tasks_one_gp(rtp, true);
646 }
647 
648 /* Spawn RCU-tasks grace-period kthread. */
649 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
650 {
651 	struct task_struct *t;
652 
653 	t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
654 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
655 		return;
656 	smp_mb(); /* Ensure others see full kthread. */
657 }
658 
659 #ifndef CONFIG_TINY_RCU
660 
661 /*
662  * Print any non-default Tasks RCU settings.
663  */
664 static void __init rcu_tasks_bootup_oddness(void)
665 {
666 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
667 	int rtsimc;
668 
669 	if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
670 		pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
671 	rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
672 	if (rtsimc != rcu_task_stall_info_mult) {
673 		pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
674 		rcu_task_stall_info_mult = rtsimc;
675 	}
676 #endif /* #ifdef CONFIG_TASKS_RCU */
677 #ifdef CONFIG_TASKS_RCU
678 	pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
679 #endif /* #ifdef CONFIG_TASKS_RCU */
680 #ifdef CONFIG_TASKS_RUDE_RCU
681 	pr_info("\tRude variant of Tasks RCU enabled.\n");
682 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
683 #ifdef CONFIG_TASKS_TRACE_RCU
684 	pr_info("\tTracing variant of Tasks RCU enabled.\n");
685 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
686 }
687 
688 #endif /* #ifndef CONFIG_TINY_RCU */
689 
690 #ifndef CONFIG_TINY_RCU
691 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
692 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
693 {
694 	int cpu;
695 	bool havecbs = false;
696 	bool haveurgent = false;
697 	bool haveurgentcbs = false;
698 
699 	for_each_possible_cpu(cpu) {
700 		struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
701 
702 		if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
703 			havecbs = true;
704 		if (data_race(rtpcp->urgent_gp))
705 			haveurgent = true;
706 		if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
707 			haveurgentcbs = true;
708 		if (havecbs && haveurgent && haveurgentcbs)
709 			break;
710 	}
711 	pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n",
712 		rtp->kname,
713 		tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
714 		jiffies - data_race(rtp->gp_jiffies),
715 		data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
716 		data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
717 		".k"[!!data_race(rtp->kthread_ptr)],
718 		".C"[havecbs],
719 		".u"[haveurgent],
720 		".U"[haveurgentcbs],
721 		rtp->lazy_jiffies,
722 		s);
723 }
724 #endif // #ifndef CONFIG_TINY_RCU
725 
726 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
727 
728 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
729 
730 ////////////////////////////////////////////////////////////////////////
731 //
732 // Shared code between task-list-scanning variants of Tasks RCU.
733 
734 /* Wait for one RCU-tasks grace period. */
735 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
736 {
737 	struct task_struct *g;
738 	int fract;
739 	LIST_HEAD(holdouts);
740 	unsigned long j;
741 	unsigned long lastinfo;
742 	unsigned long lastreport;
743 	bool reported = false;
744 	int rtsi;
745 	struct task_struct *t;
746 
747 	set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
748 	rtp->pregp_func(&holdouts);
749 
750 	/*
751 	 * There were callbacks, so we need to wait for an RCU-tasks
752 	 * grace period.  Start off by scanning the task list for tasks
753 	 * that are not already voluntarily blocked.  Mark these tasks
754 	 * and make a list of them in holdouts.
755 	 */
756 	set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
757 	if (rtp->pertask_func) {
758 		rcu_read_lock();
759 		for_each_process_thread(g, t)
760 			rtp->pertask_func(t, &holdouts);
761 		rcu_read_unlock();
762 	}
763 
764 	set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
765 	rtp->postscan_func(&holdouts);
766 
767 	/*
768 	 * Each pass through the following loop scans the list of holdout
769 	 * tasks, removing any that are no longer holdouts.  When the list
770 	 * is empty, we are done.
771 	 */
772 	lastreport = jiffies;
773 	lastinfo = lastreport;
774 	rtsi = READ_ONCE(rcu_task_stall_info);
775 
776 	// Start off with initial wait and slowly back off to 1 HZ wait.
777 	fract = rtp->init_fract;
778 
779 	while (!list_empty(&holdouts)) {
780 		ktime_t exp;
781 		bool firstreport;
782 		bool needreport;
783 		int rtst;
784 
785 		// Slowly back off waiting for holdouts
786 		set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
787 		if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
788 			schedule_timeout_idle(fract);
789 		} else {
790 			exp = jiffies_to_nsecs(fract);
791 			__set_current_state(TASK_IDLE);
792 			schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
793 		}
794 
795 		if (fract < HZ)
796 			fract++;
797 
798 		rtst = READ_ONCE(rcu_task_stall_timeout);
799 		needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
800 		if (needreport) {
801 			lastreport = jiffies;
802 			reported = true;
803 		}
804 		firstreport = true;
805 		WARN_ON(signal_pending(current));
806 		set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
807 		rtp->holdouts_func(&holdouts, needreport, &firstreport);
808 
809 		// Print pre-stall informational messages if needed.
810 		j = jiffies;
811 		if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
812 			lastinfo = j;
813 			rtsi = rtsi * rcu_task_stall_info_mult;
814 			pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
815 				__func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
816 		}
817 	}
818 
819 	set_tasks_gp_state(rtp, RTGS_POST_GP);
820 	rtp->postgp_func(rtp);
821 }
822 
823 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
824 
825 #ifdef CONFIG_TASKS_RCU
826 
827 ////////////////////////////////////////////////////////////////////////
828 //
829 // Simple variant of RCU whose quiescent states are voluntary context
830 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
831 // As such, grace periods can take one good long time.  There are no
832 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
833 // because this implementation is intended to get the system into a safe
834 // state for some of the manipulations involved in tracing and the like.
835 // Finally, this implementation does not support high call_rcu_tasks()
836 // rates from multiple CPUs.  If this is required, per-CPU callback lists
837 // will be needed.
838 //
839 // The implementation uses rcu_tasks_wait_gp(), which relies on function
840 // pointers in the rcu_tasks structure.  The rcu_spawn_tasks_kthread()
841 // function sets these function pointers up so that rcu_tasks_wait_gp()
842 // invokes these functions in this order:
843 //
844 // rcu_tasks_pregp_step():
845 //	Invokes synchronize_rcu() in order to wait for all in-flight
846 //	t->on_rq and t->nvcsw transitions to complete.	This works because
847 //	all such transitions are carried out with interrupts disabled.
848 // rcu_tasks_pertask(), invoked on every non-idle task:
849 //	For every runnable non-idle task other than the current one, use
850 //	get_task_struct() to pin down that task, snapshot that task's
851 //	number of voluntary context switches, and add that task to the
852 //	holdout list.
853 // rcu_tasks_postscan():
854 //	Invoke synchronize_srcu() to ensure that all tasks that were
855 //	in the process of exiting (and which thus might not know to
856 //	synchronize with this RCU Tasks grace period) have completed
857 //	exiting.
858 // check_all_holdout_tasks(), repeatedly until holdout list is empty:
859 //	Scans the holdout list, attempting to identify a quiescent state
860 //	for each task on the list.  If there is a quiescent state, the
861 //	corresponding task is removed from the holdout list.
862 // rcu_tasks_postgp():
863 //	Invokes synchronize_rcu() in order to ensure that all prior
864 //	t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
865 //	to have happened before the end of this RCU Tasks grace period.
866 //	Again, this works because all such transitions are carried out
867 //	with interrupts disabled.
868 //
869 // For each exiting task, the exit_tasks_rcu_start() and
870 // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
871 // read-side critical sections waited for by rcu_tasks_postscan().
872 //
873 // Pre-grace-period update-side code is ordered before the grace
874 // via the raw_spin_lock.*rcu_node().  Pre-grace-period read-side code
875 // is ordered before the grace period via synchronize_rcu() call in
876 // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
877 // disabling.
878 
879 /* Pre-grace-period preparation. */
880 static void rcu_tasks_pregp_step(struct list_head *hop)
881 {
882 	/*
883 	 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
884 	 * to complete.  Invoking synchronize_rcu() suffices because all
885 	 * these transitions occur with interrupts disabled.  Without this
886 	 * synchronize_rcu(), a read-side critical section that started
887 	 * before the grace period might be incorrectly seen as having
888 	 * started after the grace period.
889 	 *
890 	 * This synchronize_rcu() also dispenses with the need for a
891 	 * memory barrier on the first store to t->rcu_tasks_holdout,
892 	 * as it forces the store to happen after the beginning of the
893 	 * grace period.
894 	 */
895 	synchronize_rcu();
896 }
897 
898 /* Check for quiescent states since the pregp's synchronize_rcu() */
899 static bool rcu_tasks_is_holdout(struct task_struct *t)
900 {
901 	int cpu;
902 
903 	/* Has the task been seen voluntarily sleeping? */
904 	if (!READ_ONCE(t->on_rq))
905 		return false;
906 
907 	/*
908 	 * Idle tasks (or idle injection) within the idle loop are RCU-tasks
909 	 * quiescent states. But CPU boot code performed by the idle task
910 	 * isn't a quiescent state.
911 	 */
912 	if (is_idle_task(t))
913 		return false;
914 
915 	cpu = task_cpu(t);
916 
917 	/* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
918 	if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
919 		return false;
920 
921 	return true;
922 }
923 
924 /* Per-task initial processing. */
925 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
926 {
927 	if (t != current && rcu_tasks_is_holdout(t)) {
928 		get_task_struct(t);
929 		t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
930 		WRITE_ONCE(t->rcu_tasks_holdout, true);
931 		list_add(&t->rcu_tasks_holdout_list, hop);
932 	}
933 }
934 
935 /* Processing between scanning taskslist and draining the holdout list. */
936 static void rcu_tasks_postscan(struct list_head *hop)
937 {
938 	int rtsi = READ_ONCE(rcu_task_stall_info);
939 
940 	if (!IS_ENABLED(CONFIG_TINY_RCU)) {
941 		tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
942 		add_timer(&tasks_rcu_exit_srcu_stall_timer);
943 	}
944 
945 	/*
946 	 * Exiting tasks may escape the tasklist scan. Those are vulnerable
947 	 * until their final schedule() with TASK_DEAD state. To cope with
948 	 * this, divide the fragile exit path part in two intersecting
949 	 * read side critical sections:
950 	 *
951 	 * 1) An _SRCU_ read side starting before calling exit_notify(),
952 	 *    which may remove the task from the tasklist, and ending after
953 	 *    the final preempt_disable() call in do_exit().
954 	 *
955 	 * 2) An _RCU_ read side starting with the final preempt_disable()
956 	 *    call in do_exit() and ending with the final call to schedule()
957 	 *    with TASK_DEAD state.
958 	 *
959 	 * This handles the part 1). And postgp will handle part 2) with a
960 	 * call to synchronize_rcu().
961 	 */
962 	synchronize_srcu(&tasks_rcu_exit_srcu);
963 
964 	if (!IS_ENABLED(CONFIG_TINY_RCU))
965 		del_timer_sync(&tasks_rcu_exit_srcu_stall_timer);
966 }
967 
968 /* See if tasks are still holding out, complain if so. */
969 static void check_holdout_task(struct task_struct *t,
970 			       bool needreport, bool *firstreport)
971 {
972 	int cpu;
973 
974 	if (!READ_ONCE(t->rcu_tasks_holdout) ||
975 	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
976 	    !rcu_tasks_is_holdout(t) ||
977 	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
978 	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
979 		WRITE_ONCE(t->rcu_tasks_holdout, false);
980 		list_del_init(&t->rcu_tasks_holdout_list);
981 		put_task_struct(t);
982 		return;
983 	}
984 	rcu_request_urgent_qs_task(t);
985 	if (!needreport)
986 		return;
987 	if (*firstreport) {
988 		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
989 		*firstreport = false;
990 	}
991 	cpu = task_cpu(t);
992 	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
993 		 t, ".I"[is_idle_task(t)],
994 		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
995 		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
996 		 t->rcu_tasks_idle_cpu, cpu);
997 	sched_show_task(t);
998 }
999 
1000 /* Scan the holdout lists for tasks no longer holding out. */
1001 static void check_all_holdout_tasks(struct list_head *hop,
1002 				    bool needreport, bool *firstreport)
1003 {
1004 	struct task_struct *t, *t1;
1005 
1006 	list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
1007 		check_holdout_task(t, needreport, firstreport);
1008 		cond_resched();
1009 	}
1010 }
1011 
1012 /* Finish off the Tasks-RCU grace period. */
1013 static void rcu_tasks_postgp(struct rcu_tasks *rtp)
1014 {
1015 	/*
1016 	 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
1017 	 * memory barriers prior to them in the schedule() path, memory
1018 	 * reordering on other CPUs could cause their RCU-tasks read-side
1019 	 * critical sections to extend past the end of the grace period.
1020 	 * However, because these ->nvcsw updates are carried out with
1021 	 * interrupts disabled, we can use synchronize_rcu() to force the
1022 	 * needed ordering on all such CPUs.
1023 	 *
1024 	 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
1025 	 * accesses to be within the grace period, avoiding the need for
1026 	 * memory barriers for ->rcu_tasks_holdout accesses.
1027 	 *
1028 	 * In addition, this synchronize_rcu() waits for exiting tasks
1029 	 * to complete their final preempt_disable() region of execution,
1030 	 * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
1031 	 * enforcing the whole region before tasklist removal until
1032 	 * the final schedule() with TASK_DEAD state to be an RCU TASKS
1033 	 * read side critical section.
1034 	 */
1035 	synchronize_rcu();
1036 }
1037 
1038 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
1039 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
1040 
1041 static void tasks_rcu_exit_srcu_stall(struct timer_list *unused)
1042 {
1043 #ifndef CONFIG_TINY_RCU
1044 	int rtsi;
1045 
1046 	rtsi = READ_ONCE(rcu_task_stall_info);
1047 	pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n",
1048 		__func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq,
1049 		tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies);
1050 	pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n");
1051 	tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
1052 	add_timer(&tasks_rcu_exit_srcu_stall_timer);
1053 #endif // #ifndef CONFIG_TINY_RCU
1054 }
1055 
1056 /**
1057  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
1058  * @rhp: structure to be used for queueing the RCU updates.
1059  * @func: actual callback function to be invoked after the grace period
1060  *
1061  * The callback function will be invoked some time after a full grace
1062  * period elapses, in other words after all currently executing RCU
1063  * read-side critical sections have completed. call_rcu_tasks() assumes
1064  * that the read-side critical sections end at a voluntary context
1065  * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
1066  * or transition to usermode execution.  As such, there are no read-side
1067  * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1068  * this primitive is intended to determine that all tasks have passed
1069  * through a safe state, not so much for data-structure synchronization.
1070  *
1071  * See the description of call_rcu() for more detailed information on
1072  * memory ordering guarantees.
1073  */
1074 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
1075 {
1076 	call_rcu_tasks_generic(rhp, func, &rcu_tasks);
1077 }
1078 EXPORT_SYMBOL_GPL(call_rcu_tasks);
1079 
1080 /**
1081  * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
1082  *
1083  * Control will return to the caller some time after a full rcu-tasks
1084  * grace period has elapsed, in other words after all currently
1085  * executing rcu-tasks read-side critical sections have elapsed.  These
1086  * read-side critical sections are delimited by calls to schedule(),
1087  * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
1088  * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
1089  *
1090  * This is a very specialized primitive, intended only for a few uses in
1091  * tracing and other situations requiring manipulation of function
1092  * preambles and profiling hooks.  The synchronize_rcu_tasks() function
1093  * is not (yet) intended for heavy use from multiple CPUs.
1094  *
1095  * See the description of synchronize_rcu() for more detailed information
1096  * on memory ordering guarantees.
1097  */
1098 void synchronize_rcu_tasks(void)
1099 {
1100 	synchronize_rcu_tasks_generic(&rcu_tasks);
1101 }
1102 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
1103 
1104 /**
1105  * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
1106  *
1107  * Although the current implementation is guaranteed to wait, it is not
1108  * obligated to, for example, if there are no pending callbacks.
1109  */
1110 void rcu_barrier_tasks(void)
1111 {
1112 	rcu_barrier_tasks_generic(&rcu_tasks);
1113 }
1114 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
1115 
1116 static int rcu_tasks_lazy_ms = -1;
1117 module_param(rcu_tasks_lazy_ms, int, 0444);
1118 
1119 static int __init rcu_spawn_tasks_kthread(void)
1120 {
1121 	cblist_init_generic(&rcu_tasks);
1122 	rcu_tasks.gp_sleep = HZ / 10;
1123 	rcu_tasks.init_fract = HZ / 10;
1124 	if (rcu_tasks_lazy_ms >= 0)
1125 		rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms);
1126 	rcu_tasks.pregp_func = rcu_tasks_pregp_step;
1127 	rcu_tasks.pertask_func = rcu_tasks_pertask;
1128 	rcu_tasks.postscan_func = rcu_tasks_postscan;
1129 	rcu_tasks.holdouts_func = check_all_holdout_tasks;
1130 	rcu_tasks.postgp_func = rcu_tasks_postgp;
1131 	rcu_spawn_tasks_kthread_generic(&rcu_tasks);
1132 	return 0;
1133 }
1134 
1135 #if !defined(CONFIG_TINY_RCU)
1136 void show_rcu_tasks_classic_gp_kthread(void)
1137 {
1138 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
1139 }
1140 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
1141 #endif // !defined(CONFIG_TINY_RCU)
1142 
1143 struct task_struct *get_rcu_tasks_gp_kthread(void)
1144 {
1145 	return rcu_tasks.kthread_ptr;
1146 }
1147 EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread);
1148 
1149 /*
1150  * Contribute to protect against tasklist scan blind spot while the
1151  * task is exiting and may be removed from the tasklist. See
1152  * corresponding synchronize_srcu() for further details.
1153  */
1154 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
1155 {
1156 	current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
1157 }
1158 
1159 /*
1160  * Contribute to protect against tasklist scan blind spot while the
1161  * task is exiting and may be removed from the tasklist. See
1162  * corresponding synchronize_srcu() for further details.
1163  */
1164 void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
1165 {
1166 	struct task_struct *t = current;
1167 
1168 	__srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
1169 }
1170 
1171 /*
1172  * Contribute to protect against tasklist scan blind spot while the
1173  * task is exiting and may be removed from the tasklist. See
1174  * corresponding synchronize_srcu() for further details.
1175  */
1176 void exit_tasks_rcu_finish(void)
1177 {
1178 	exit_tasks_rcu_stop();
1179 	exit_tasks_rcu_finish_trace(current);
1180 }
1181 
1182 #else /* #ifdef CONFIG_TASKS_RCU */
1183 void exit_tasks_rcu_start(void) { }
1184 void exit_tasks_rcu_stop(void) { }
1185 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
1186 #endif /* #else #ifdef CONFIG_TASKS_RCU */
1187 
1188 #ifdef CONFIG_TASKS_RUDE_RCU
1189 
1190 ////////////////////////////////////////////////////////////////////////
1191 //
1192 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
1193 // passing an empty function to schedule_on_each_cpu().  This approach
1194 // provides an asynchronous call_rcu_tasks_rude() API and batching of
1195 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
1196 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
1197 // and induces otherwise unnecessary context switches on all online CPUs,
1198 // whether idle or not.
1199 //
1200 // Callback handling is provided by the rcu_tasks_kthread() function.
1201 //
1202 // Ordering is provided by the scheduler's context-switch code.
1203 
1204 // Empty function to allow workqueues to force a context switch.
1205 static void rcu_tasks_be_rude(struct work_struct *work)
1206 {
1207 }
1208 
1209 // Wait for one rude RCU-tasks grace period.
1210 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1211 {
1212 	rtp->n_ipis += cpumask_weight(cpu_online_mask);
1213 	schedule_on_each_cpu(rcu_tasks_be_rude);
1214 }
1215 
1216 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1217 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1218 		 "RCU Tasks Rude");
1219 
1220 /**
1221  * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1222  * @rhp: structure to be used for queueing the RCU updates.
1223  * @func: actual callback function to be invoked after the grace period
1224  *
1225  * The callback function will be invoked some time after a full grace
1226  * period elapses, in other words after all currently executing RCU
1227  * read-side critical sections have completed. call_rcu_tasks_rude()
1228  * assumes that the read-side critical sections end at context switch,
1229  * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1230  * usermode execution is schedulable). As such, there are no read-side
1231  * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1232  * this primitive is intended to determine that all tasks have passed
1233  * through a safe state, not so much for data-structure synchronization.
1234  *
1235  * See the description of call_rcu() for more detailed information on
1236  * memory ordering guarantees.
1237  */
1238 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1239 {
1240 	call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1241 }
1242 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1243 
1244 /**
1245  * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1246  *
1247  * Control will return to the caller some time after a rude rcu-tasks
1248  * grace period has elapsed, in other words after all currently
1249  * executing rcu-tasks read-side critical sections have elapsed.  These
1250  * read-side critical sections are delimited by calls to schedule(),
1251  * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1252  * context), and (in theory, anyway) cond_resched().
1253  *
1254  * This is a very specialized primitive, intended only for a few uses in
1255  * tracing and other situations requiring manipulation of function preambles
1256  * and profiling hooks.  The synchronize_rcu_tasks_rude() function is not
1257  * (yet) intended for heavy use from multiple CPUs.
1258  *
1259  * See the description of synchronize_rcu() for more detailed information
1260  * on memory ordering guarantees.
1261  */
1262 void synchronize_rcu_tasks_rude(void)
1263 {
1264 	synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1265 }
1266 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1267 
1268 /**
1269  * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1270  *
1271  * Although the current implementation is guaranteed to wait, it is not
1272  * obligated to, for example, if there are no pending callbacks.
1273  */
1274 void rcu_barrier_tasks_rude(void)
1275 {
1276 	rcu_barrier_tasks_generic(&rcu_tasks_rude);
1277 }
1278 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1279 
1280 int rcu_tasks_rude_lazy_ms = -1;
1281 module_param(rcu_tasks_rude_lazy_ms, int, 0444);
1282 
1283 static int __init rcu_spawn_tasks_rude_kthread(void)
1284 {
1285 	cblist_init_generic(&rcu_tasks_rude);
1286 	rcu_tasks_rude.gp_sleep = HZ / 10;
1287 	if (rcu_tasks_rude_lazy_ms >= 0)
1288 		rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms);
1289 	rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1290 	return 0;
1291 }
1292 
1293 #if !defined(CONFIG_TINY_RCU)
1294 void show_rcu_tasks_rude_gp_kthread(void)
1295 {
1296 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1297 }
1298 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1299 #endif // !defined(CONFIG_TINY_RCU)
1300 
1301 struct task_struct *get_rcu_tasks_rude_gp_kthread(void)
1302 {
1303 	return rcu_tasks_rude.kthread_ptr;
1304 }
1305 EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread);
1306 
1307 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1308 
1309 ////////////////////////////////////////////////////////////////////////
1310 //
1311 // Tracing variant of Tasks RCU.  This variant is designed to be used
1312 // to protect tracing hooks, including those of BPF.  This variant
1313 // therefore:
1314 //
1315 // 1.	Has explicit read-side markers to allow finite grace periods
1316 //	in the face of in-kernel loops for PREEMPT=n builds.
1317 //
1318 // 2.	Protects code in the idle loop, exception entry/exit, and
1319 //	CPU-hotplug code paths, similar to the capabilities of SRCU.
1320 //
1321 // 3.	Avoids expensive read-side instructions, having overhead similar
1322 //	to that of Preemptible RCU.
1323 //
1324 // There are of course downsides.  For example, the grace-period code
1325 // can send IPIs to CPUs, even when those CPUs are in the idle loop or
1326 // in nohz_full userspace.  If needed, these downsides can be at least
1327 // partially remedied.
1328 //
1329 // Perhaps most important, this variant of RCU does not affect the vanilla
1330 // flavors, rcu_preempt and rcu_sched.  The fact that RCU Tasks Trace
1331 // readers can operate from idle, offline, and exception entry/exit in no
1332 // way allows rcu_preempt and rcu_sched readers to also do so.
1333 //
1334 // The implementation uses rcu_tasks_wait_gp(), which relies on function
1335 // pointers in the rcu_tasks structure.  The rcu_spawn_tasks_trace_kthread()
1336 // function sets these function pointers up so that rcu_tasks_wait_gp()
1337 // invokes these functions in this order:
1338 //
1339 // rcu_tasks_trace_pregp_step():
1340 //	Disables CPU hotplug, adds all currently executing tasks to the
1341 //	holdout list, then checks the state of all tasks that blocked
1342 //	or were preempted within their current RCU Tasks Trace read-side
1343 //	critical section, adding them to the holdout list if appropriate.
1344 //	Finally, this function re-enables CPU hotplug.
1345 // The ->pertask_func() pointer is NULL, so there is no per-task processing.
1346 // rcu_tasks_trace_postscan():
1347 //	Invokes synchronize_rcu() to wait for late-stage exiting tasks
1348 //	to finish exiting.
1349 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1350 //	Scans the holdout list, attempting to identify a quiescent state
1351 //	for each task on the list.  If there is a quiescent state, the
1352 //	corresponding task is removed from the holdout list.  Once this
1353 //	list is empty, the grace period has completed.
1354 // rcu_tasks_trace_postgp():
1355 //	Provides the needed full memory barrier and does debug checks.
1356 //
1357 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1358 //
1359 // Pre-grace-period update-side code is ordered before the grace period
1360 // via the ->cbs_lock and barriers in rcu_tasks_kthread().  Pre-grace-period
1361 // read-side code is ordered before the grace period by atomic operations
1362 // on .b.need_qs flag of each task involved in this process, or by scheduler
1363 // context-switch ordering (for locked-down non-running readers).
1364 
1365 // The lockdep state must be outside of #ifdef to be useful.
1366 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1367 static struct lock_class_key rcu_lock_trace_key;
1368 struct lockdep_map rcu_trace_lock_map =
1369 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1370 EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1371 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1372 
1373 #ifdef CONFIG_TASKS_TRACE_RCU
1374 
1375 // Record outstanding IPIs to each CPU.  No point in sending two...
1376 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1377 
1378 // The number of detections of task quiescent state relying on
1379 // heavyweight readers executing explicit memory barriers.
1380 static unsigned long n_heavy_reader_attempts;
1381 static unsigned long n_heavy_reader_updates;
1382 static unsigned long n_heavy_reader_ofl_updates;
1383 static unsigned long n_trc_holdouts;
1384 
1385 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1386 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1387 		 "RCU Tasks Trace");
1388 
1389 /* Load from ->trc_reader_special.b.need_qs with proper ordering. */
1390 static u8 rcu_ld_need_qs(struct task_struct *t)
1391 {
1392 	smp_mb(); // Enforce full grace-period ordering.
1393 	return smp_load_acquire(&t->trc_reader_special.b.need_qs);
1394 }
1395 
1396 /* Store to ->trc_reader_special.b.need_qs with proper ordering. */
1397 static void rcu_st_need_qs(struct task_struct *t, u8 v)
1398 {
1399 	smp_store_release(&t->trc_reader_special.b.need_qs, v);
1400 	smp_mb(); // Enforce full grace-period ordering.
1401 }
1402 
1403 /*
1404  * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1405  * the four-byte operand-size restriction of some platforms.
1406  * Returns the old value, which is often ignored.
1407  */
1408 u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
1409 {
1410 	union rcu_special ret;
1411 	union rcu_special trs_old = READ_ONCE(t->trc_reader_special);
1412 	union rcu_special trs_new = trs_old;
1413 
1414 	if (trs_old.b.need_qs != old)
1415 		return trs_old.b.need_qs;
1416 	trs_new.b.need_qs = new;
1417 	ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s);
1418 	return ret.b.need_qs;
1419 }
1420 EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
1421 
1422 /*
1423  * If we are the last reader, signal the grace-period kthread.
1424  * Also remove from the per-CPU list of blocked tasks.
1425  */
1426 void rcu_read_unlock_trace_special(struct task_struct *t)
1427 {
1428 	unsigned long flags;
1429 	struct rcu_tasks_percpu *rtpcp;
1430 	union rcu_special trs;
1431 
1432 	// Open-coded full-word version of rcu_ld_need_qs().
1433 	smp_mb(); // Enforce full grace-period ordering.
1434 	trs = smp_load_acquire(&t->trc_reader_special);
1435 
1436 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
1437 		smp_mb(); // Pairs with update-side barriers.
1438 	// Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1439 	if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
1440 		u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
1441 						       TRC_NEED_QS_CHECKED);
1442 
1443 		WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
1444 	}
1445 	if (trs.b.blocked) {
1446 		rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
1447 		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1448 		list_del_init(&t->trc_blkd_node);
1449 		WRITE_ONCE(t->trc_reader_special.b.blocked, false);
1450 		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1451 	}
1452 	WRITE_ONCE(t->trc_reader_nesting, 0);
1453 }
1454 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1455 
1456 /* Add a newly blocked reader task to its CPU's list. */
1457 void rcu_tasks_trace_qs_blkd(struct task_struct *t)
1458 {
1459 	unsigned long flags;
1460 	struct rcu_tasks_percpu *rtpcp;
1461 
1462 	local_irq_save(flags);
1463 	rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
1464 	raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
1465 	t->trc_blkd_cpu = smp_processor_id();
1466 	if (!rtpcp->rtp_blkd_tasks.next)
1467 		INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
1468 	list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1469 	WRITE_ONCE(t->trc_reader_special.b.blocked, true);
1470 	raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1471 }
1472 EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
1473 
1474 /* Add a task to the holdout list, if it is not already on the list. */
1475 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1476 {
1477 	if (list_empty(&t->trc_holdout_list)) {
1478 		get_task_struct(t);
1479 		list_add(&t->trc_holdout_list, bhp);
1480 		n_trc_holdouts++;
1481 	}
1482 }
1483 
1484 /* Remove a task from the holdout list, if it is in fact present. */
1485 static void trc_del_holdout(struct task_struct *t)
1486 {
1487 	if (!list_empty(&t->trc_holdout_list)) {
1488 		list_del_init(&t->trc_holdout_list);
1489 		put_task_struct(t);
1490 		n_trc_holdouts--;
1491 	}
1492 }
1493 
1494 /* IPI handler to check task state. */
1495 static void trc_read_check_handler(void *t_in)
1496 {
1497 	int nesting;
1498 	struct task_struct *t = current;
1499 	struct task_struct *texp = t_in;
1500 
1501 	// If the task is no longer running on this CPU, leave.
1502 	if (unlikely(texp != t))
1503 		goto reset_ipi; // Already on holdout list, so will check later.
1504 
1505 	// If the task is not in a read-side critical section, and
1506 	// if this is the last reader, awaken the grace-period kthread.
1507 	nesting = READ_ONCE(t->trc_reader_nesting);
1508 	if (likely(!nesting)) {
1509 		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1510 		goto reset_ipi;
1511 	}
1512 	// If we are racing with an rcu_read_unlock_trace(), try again later.
1513 	if (unlikely(nesting < 0))
1514 		goto reset_ipi;
1515 
1516 	// Get here if the task is in a read-side critical section.
1517 	// Set its state so that it will update state for the grace-period
1518 	// kthread upon exit from that critical section.
1519 	rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
1520 
1521 reset_ipi:
1522 	// Allow future IPIs to be sent on CPU and for task.
1523 	// Also order this IPI handler against any later manipulations of
1524 	// the intended task.
1525 	smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1526 	smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1527 }
1528 
1529 /* Callback function for scheduler to check locked-down task.  */
1530 static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
1531 {
1532 	struct list_head *bhp = bhp_in;
1533 	int cpu = task_cpu(t);
1534 	int nesting;
1535 	bool ofl = cpu_is_offline(cpu);
1536 
1537 	if (task_curr(t) && !ofl) {
1538 		// If no chance of heavyweight readers, do it the hard way.
1539 		if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1540 			return -EINVAL;
1541 
1542 		// If heavyweight readers are enabled on the remote task,
1543 		// we can inspect its state despite its currently running.
1544 		// However, we cannot safely change its state.
1545 		n_heavy_reader_attempts++;
1546 		// Check for "running" idle tasks on offline CPUs.
1547 		if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
1548 			return -EINVAL; // No quiescent state, do it the hard way.
1549 		n_heavy_reader_updates++;
1550 		nesting = 0;
1551 	} else {
1552 		// The task is not running, so C-language access is safe.
1553 		nesting = t->trc_reader_nesting;
1554 		WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
1555 		if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
1556 			n_heavy_reader_ofl_updates++;
1557 	}
1558 
1559 	// If not exiting a read-side critical section, mark as checked
1560 	// so that the grace-period kthread will remove it from the
1561 	// holdout list.
1562 	if (!nesting) {
1563 		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1564 		return 0;  // In QS, so done.
1565 	}
1566 	if (nesting < 0)
1567 		return -EINVAL; // Reader transitioning, try again later.
1568 
1569 	// The task is in a read-side critical section, so set up its
1570 	// state so that it will update state upon exit from that critical
1571 	// section.
1572 	if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
1573 		trc_add_holdout(t, bhp);
1574 	return 0;
1575 }
1576 
1577 /* Attempt to extract the state for the specified task. */
1578 static void trc_wait_for_one_reader(struct task_struct *t,
1579 				    struct list_head *bhp)
1580 {
1581 	int cpu;
1582 
1583 	// If a previous IPI is still in flight, let it complete.
1584 	if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1585 		return;
1586 
1587 	// The current task had better be in a quiescent state.
1588 	if (t == current) {
1589 		rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1590 		WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1591 		return;
1592 	}
1593 
1594 	// Attempt to nail down the task for inspection.
1595 	get_task_struct(t);
1596 	if (!task_call_func(t, trc_inspect_reader, bhp)) {
1597 		put_task_struct(t);
1598 		return;
1599 	}
1600 	put_task_struct(t);
1601 
1602 	// If this task is not yet on the holdout list, then we are in
1603 	// an RCU read-side critical section.  Otherwise, the invocation of
1604 	// trc_add_holdout() that added it to the list did the necessary
1605 	// get_task_struct().  Either way, the task cannot be freed out
1606 	// from under this code.
1607 
1608 	// If currently running, send an IPI, either way, add to list.
1609 	trc_add_holdout(t, bhp);
1610 	if (task_curr(t) &&
1611 	    time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1612 		// The task is currently running, so try IPIing it.
1613 		cpu = task_cpu(t);
1614 
1615 		// If there is already an IPI outstanding, let it happen.
1616 		if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1617 			return;
1618 
1619 		per_cpu(trc_ipi_to_cpu, cpu) = true;
1620 		t->trc_ipi_to_cpu = cpu;
1621 		rcu_tasks_trace.n_ipis++;
1622 		if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1623 			// Just in case there is some other reason for
1624 			// failure than the target CPU being offline.
1625 			WARN_ONCE(1, "%s():  smp_call_function_single() failed for CPU: %d\n",
1626 				  __func__, cpu);
1627 			rcu_tasks_trace.n_ipis_fails++;
1628 			per_cpu(trc_ipi_to_cpu, cpu) = false;
1629 			t->trc_ipi_to_cpu = -1;
1630 		}
1631 	}
1632 }
1633 
1634 /*
1635  * Initialize for first-round processing for the specified task.
1636  * Return false if task is NULL or already taken care of, true otherwise.
1637  */
1638 static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
1639 {
1640 	// During early boot when there is only the one boot CPU, there
1641 	// is no idle task for the other CPUs.	Also, the grace-period
1642 	// kthread is always in a quiescent state.  In addition, just return
1643 	// if this task is already on the list.
1644 	if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
1645 		return false;
1646 
1647 	rcu_st_need_qs(t, 0);
1648 	t->trc_ipi_to_cpu = -1;
1649 	return true;
1650 }
1651 
1652 /* Do first-round processing for the specified task. */
1653 static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
1654 {
1655 	if (rcu_tasks_trace_pertask_prep(t, true))
1656 		trc_wait_for_one_reader(t, hop);
1657 }
1658 
1659 /* Initialize for a new RCU-tasks-trace grace period. */
1660 static void rcu_tasks_trace_pregp_step(struct list_head *hop)
1661 {
1662 	LIST_HEAD(blkd_tasks);
1663 	int cpu;
1664 	unsigned long flags;
1665 	struct rcu_tasks_percpu *rtpcp;
1666 	struct task_struct *t;
1667 
1668 	// There shouldn't be any old IPIs, but...
1669 	for_each_possible_cpu(cpu)
1670 		WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1671 
1672 	// Disable CPU hotplug across the CPU scan for the benefit of
1673 	// any IPIs that might be needed.  This also waits for all readers
1674 	// in CPU-hotplug code paths.
1675 	cpus_read_lock();
1676 
1677 	// These rcu_tasks_trace_pertask_prep() calls are serialized to
1678 	// allow safe access to the hop list.
1679 	for_each_online_cpu(cpu) {
1680 		rcu_read_lock();
1681 		t = cpu_curr_snapshot(cpu);
1682 		if (rcu_tasks_trace_pertask_prep(t, true))
1683 			trc_add_holdout(t, hop);
1684 		rcu_read_unlock();
1685 		cond_resched_tasks_rcu_qs();
1686 	}
1687 
1688 	// Only after all running tasks have been accounted for is it
1689 	// safe to take care of the tasks that have blocked within their
1690 	// current RCU tasks trace read-side critical section.
1691 	for_each_possible_cpu(cpu) {
1692 		rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
1693 		raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1694 		list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
1695 		while (!list_empty(&blkd_tasks)) {
1696 			rcu_read_lock();
1697 			t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
1698 			list_del_init(&t->trc_blkd_node);
1699 			list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1700 			raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1701 			rcu_tasks_trace_pertask(t, hop);
1702 			rcu_read_unlock();
1703 			raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1704 		}
1705 		raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1706 		cond_resched_tasks_rcu_qs();
1707 	}
1708 
1709 	// Re-enable CPU hotplug now that the holdout list is populated.
1710 	cpus_read_unlock();
1711 }
1712 
1713 /*
1714  * Do intermediate processing between task and holdout scans.
1715  */
1716 static void rcu_tasks_trace_postscan(struct list_head *hop)
1717 {
1718 	// Wait for late-stage exiting tasks to finish exiting.
1719 	// These might have passed the call to exit_tasks_rcu_finish().
1720 
1721 	// If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
1722 	synchronize_rcu();
1723 	// Any tasks that exit after this point will set
1724 	// TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
1725 }
1726 
1727 /* Communicate task state back to the RCU tasks trace stall warning request. */
1728 struct trc_stall_chk_rdr {
1729 	int nesting;
1730 	int ipi_to_cpu;
1731 	u8 needqs;
1732 };
1733 
1734 static int trc_check_slow_task(struct task_struct *t, void *arg)
1735 {
1736 	struct trc_stall_chk_rdr *trc_rdrp = arg;
1737 
1738 	if (task_curr(t) && cpu_online(task_cpu(t)))
1739 		return false; // It is running, so decline to inspect it.
1740 	trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1741 	trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1742 	trc_rdrp->needqs = rcu_ld_need_qs(t);
1743 	return true;
1744 }
1745 
1746 /* Show the state of a task stalling the current RCU tasks trace GP. */
1747 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1748 {
1749 	int cpu;
1750 	struct trc_stall_chk_rdr trc_rdr;
1751 	bool is_idle_tsk = is_idle_task(t);
1752 
1753 	if (*firstreport) {
1754 		pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1755 		*firstreport = false;
1756 	}
1757 	cpu = task_cpu(t);
1758 	if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1759 		pr_alert("P%d: %c%c\n",
1760 			 t->pid,
1761 			 ".I"[t->trc_ipi_to_cpu >= 0],
1762 			 ".i"[is_idle_tsk]);
1763 	else
1764 		pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
1765 			 t->pid,
1766 			 ".I"[trc_rdr.ipi_to_cpu >= 0],
1767 			 ".i"[is_idle_tsk],
1768 			 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1769 			 ".B"[!!data_race(t->trc_reader_special.b.blocked)],
1770 			 trc_rdr.nesting,
1771 			 " !CN"[trc_rdr.needqs & 0x3],
1772 			 " ?"[trc_rdr.needqs > 0x3],
1773 			 cpu, cpu_online(cpu) ? "" : "(offline)");
1774 	sched_show_task(t);
1775 }
1776 
1777 /* List stalled IPIs for RCU tasks trace. */
1778 static void show_stalled_ipi_trace(void)
1779 {
1780 	int cpu;
1781 
1782 	for_each_possible_cpu(cpu)
1783 		if (per_cpu(trc_ipi_to_cpu, cpu))
1784 			pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1785 }
1786 
1787 /* Do one scan of the holdout list. */
1788 static void check_all_holdout_tasks_trace(struct list_head *hop,
1789 					  bool needreport, bool *firstreport)
1790 {
1791 	struct task_struct *g, *t;
1792 
1793 	// Disable CPU hotplug across the holdout list scan for IPIs.
1794 	cpus_read_lock();
1795 
1796 	list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1797 		// If safe and needed, try to check the current task.
1798 		if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1799 		    !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
1800 			trc_wait_for_one_reader(t, hop);
1801 
1802 		// If check succeeded, remove this task from the list.
1803 		if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1804 		    rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
1805 			trc_del_holdout(t);
1806 		else if (needreport)
1807 			show_stalled_task_trace(t, firstreport);
1808 		cond_resched_tasks_rcu_qs();
1809 	}
1810 
1811 	// Re-enable CPU hotplug now that the holdout list scan has completed.
1812 	cpus_read_unlock();
1813 
1814 	if (needreport) {
1815 		if (*firstreport)
1816 			pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1817 		show_stalled_ipi_trace();
1818 	}
1819 }
1820 
1821 static void rcu_tasks_trace_empty_fn(void *unused)
1822 {
1823 }
1824 
1825 /* Wait for grace period to complete and provide ordering. */
1826 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1827 {
1828 	int cpu;
1829 
1830 	// Wait for any lingering IPI handlers to complete.  Note that
1831 	// if a CPU has gone offline or transitioned to userspace in the
1832 	// meantime, all IPI handlers should have been drained beforehand.
1833 	// Yes, this assumes that CPUs process IPIs in order.  If that ever
1834 	// changes, there will need to be a recheck and/or timed wait.
1835 	for_each_online_cpu(cpu)
1836 		if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1837 			smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1838 
1839 	smp_mb(); // Caller's code must be ordered after wakeup.
1840 		  // Pairs with pretty much every ordering primitive.
1841 }
1842 
1843 /* Report any needed quiescent state for this exiting task. */
1844 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1845 {
1846 	union rcu_special trs = READ_ONCE(t->trc_reader_special);
1847 
1848 	rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1849 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1850 	if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
1851 		rcu_read_unlock_trace_special(t);
1852 	else
1853 		WRITE_ONCE(t->trc_reader_nesting, 0);
1854 }
1855 
1856 /**
1857  * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1858  * @rhp: structure to be used for queueing the RCU updates.
1859  * @func: actual callback function to be invoked after the grace period
1860  *
1861  * The callback function will be invoked some time after a trace rcu-tasks
1862  * grace period elapses, in other words after all currently executing
1863  * trace rcu-tasks read-side critical sections have completed. These
1864  * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1865  * and rcu_read_unlock_trace().
1866  *
1867  * See the description of call_rcu() for more detailed information on
1868  * memory ordering guarantees.
1869  */
1870 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1871 {
1872 	call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1873 }
1874 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1875 
1876 /**
1877  * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1878  *
1879  * Control will return to the caller some time after a trace rcu-tasks
1880  * grace period has elapsed, in other words after all currently executing
1881  * trace rcu-tasks read-side critical sections have elapsed. These read-side
1882  * critical sections are delimited by calls to rcu_read_lock_trace()
1883  * and rcu_read_unlock_trace().
1884  *
1885  * This is a very specialized primitive, intended only for a few uses in
1886  * tracing and other situations requiring manipulation of function preambles
1887  * and profiling hooks.  The synchronize_rcu_tasks_trace() function is not
1888  * (yet) intended for heavy use from multiple CPUs.
1889  *
1890  * See the description of synchronize_rcu() for more detailed information
1891  * on memory ordering guarantees.
1892  */
1893 void synchronize_rcu_tasks_trace(void)
1894 {
1895 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1896 	synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1897 }
1898 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1899 
1900 /**
1901  * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1902  *
1903  * Although the current implementation is guaranteed to wait, it is not
1904  * obligated to, for example, if there are no pending callbacks.
1905  */
1906 void rcu_barrier_tasks_trace(void)
1907 {
1908 	rcu_barrier_tasks_generic(&rcu_tasks_trace);
1909 }
1910 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1911 
1912 int rcu_tasks_trace_lazy_ms = -1;
1913 module_param(rcu_tasks_trace_lazy_ms, int, 0444);
1914 
1915 static int __init rcu_spawn_tasks_trace_kthread(void)
1916 {
1917 	cblist_init_generic(&rcu_tasks_trace);
1918 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1919 		rcu_tasks_trace.gp_sleep = HZ / 10;
1920 		rcu_tasks_trace.init_fract = HZ / 10;
1921 	} else {
1922 		rcu_tasks_trace.gp_sleep = HZ / 200;
1923 		if (rcu_tasks_trace.gp_sleep <= 0)
1924 			rcu_tasks_trace.gp_sleep = 1;
1925 		rcu_tasks_trace.init_fract = HZ / 200;
1926 		if (rcu_tasks_trace.init_fract <= 0)
1927 			rcu_tasks_trace.init_fract = 1;
1928 	}
1929 	if (rcu_tasks_trace_lazy_ms >= 0)
1930 		rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms);
1931 	rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1932 	rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1933 	rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1934 	rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1935 	rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1936 	return 0;
1937 }
1938 
1939 #if !defined(CONFIG_TINY_RCU)
1940 void show_rcu_tasks_trace_gp_kthread(void)
1941 {
1942 	char buf[64];
1943 
1944 	sprintf(buf, "N%lu h:%lu/%lu/%lu",
1945 		data_race(n_trc_holdouts),
1946 		data_race(n_heavy_reader_ofl_updates),
1947 		data_race(n_heavy_reader_updates),
1948 		data_race(n_heavy_reader_attempts));
1949 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1950 }
1951 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1952 #endif // !defined(CONFIG_TINY_RCU)
1953 
1954 struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
1955 {
1956 	return rcu_tasks_trace.kthread_ptr;
1957 }
1958 EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
1959 
1960 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
1961 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1962 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1963 
1964 #ifndef CONFIG_TINY_RCU
1965 void show_rcu_tasks_gp_kthreads(void)
1966 {
1967 	show_rcu_tasks_classic_gp_kthread();
1968 	show_rcu_tasks_rude_gp_kthread();
1969 	show_rcu_tasks_trace_gp_kthread();
1970 }
1971 #endif /* #ifndef CONFIG_TINY_RCU */
1972 
1973 #ifdef CONFIG_PROVE_RCU
1974 struct rcu_tasks_test_desc {
1975 	struct rcu_head rh;
1976 	const char *name;
1977 	bool notrun;
1978 	unsigned long runstart;
1979 };
1980 
1981 static struct rcu_tasks_test_desc tests[] = {
1982 	{
1983 		.name = "call_rcu_tasks()",
1984 		/* If not defined, the test is skipped. */
1985 		.notrun = IS_ENABLED(CONFIG_TASKS_RCU),
1986 	},
1987 	{
1988 		.name = "call_rcu_tasks_rude()",
1989 		/* If not defined, the test is skipped. */
1990 		.notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1991 	},
1992 	{
1993 		.name = "call_rcu_tasks_trace()",
1994 		/* If not defined, the test is skipped. */
1995 		.notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1996 	}
1997 };
1998 
1999 static void test_rcu_tasks_callback(struct rcu_head *rhp)
2000 {
2001 	struct rcu_tasks_test_desc *rttd =
2002 		container_of(rhp, struct rcu_tasks_test_desc, rh);
2003 
2004 	pr_info("Callback from %s invoked.\n", rttd->name);
2005 
2006 	rttd->notrun = false;
2007 }
2008 
2009 static void rcu_tasks_initiate_self_tests(void)
2010 {
2011 #ifdef CONFIG_TASKS_RCU
2012 	pr_info("Running RCU Tasks wait API self tests\n");
2013 	tests[0].runstart = jiffies;
2014 	synchronize_rcu_tasks();
2015 	call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
2016 #endif
2017 
2018 #ifdef CONFIG_TASKS_RUDE_RCU
2019 	pr_info("Running RCU Tasks Rude wait API self tests\n");
2020 	tests[1].runstart = jiffies;
2021 	synchronize_rcu_tasks_rude();
2022 	call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
2023 #endif
2024 
2025 #ifdef CONFIG_TASKS_TRACE_RCU
2026 	pr_info("Running RCU Tasks Trace wait API self tests\n");
2027 	tests[2].runstart = jiffies;
2028 	synchronize_rcu_tasks_trace();
2029 	call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
2030 #endif
2031 }
2032 
2033 /*
2034  * Return:  0 - test passed
2035  *	    1 - test failed, but have not timed out yet
2036  *	   -1 - test failed and timed out
2037  */
2038 static int rcu_tasks_verify_self_tests(void)
2039 {
2040 	int ret = 0;
2041 	int i;
2042 	unsigned long bst = rcu_task_stall_timeout;
2043 
2044 	if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT)
2045 		bst = RCU_TASK_BOOT_STALL_TIMEOUT;
2046 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
2047 		while (tests[i].notrun) {		// still hanging.
2048 			if (time_after(jiffies, tests[i].runstart + bst)) {
2049 				pr_err("%s has failed boot-time tests.\n", tests[i].name);
2050 				ret = -1;
2051 				break;
2052 			}
2053 			ret = 1;
2054 			break;
2055 		}
2056 	}
2057 	WARN_ON(ret < 0);
2058 
2059 	return ret;
2060 }
2061 
2062 /*
2063  * Repeat the rcu_tasks_verify_self_tests() call once every second until the
2064  * test passes or has timed out.
2065  */
2066 static struct delayed_work rcu_tasks_verify_work;
2067 static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
2068 {
2069 	int ret = rcu_tasks_verify_self_tests();
2070 
2071 	if (ret <= 0)
2072 		return;
2073 
2074 	/* Test fails but not timed out yet, reschedule another check */
2075 	schedule_delayed_work(&rcu_tasks_verify_work, HZ);
2076 }
2077 
2078 static int rcu_tasks_verify_schedule_work(void)
2079 {
2080 	INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn);
2081 	rcu_tasks_verify_work_fn(NULL);
2082 	return 0;
2083 }
2084 late_initcall(rcu_tasks_verify_schedule_work);
2085 #else /* #ifdef CONFIG_PROVE_RCU */
2086 static void rcu_tasks_initiate_self_tests(void) { }
2087 #endif /* #else #ifdef CONFIG_PROVE_RCU */
2088 
2089 void __init rcu_init_tasks_generic(void)
2090 {
2091 #ifdef CONFIG_TASKS_RCU
2092 	rcu_spawn_tasks_kthread();
2093 #endif
2094 
2095 #ifdef CONFIG_TASKS_RUDE_RCU
2096 	rcu_spawn_tasks_rude_kthread();
2097 #endif
2098 
2099 #ifdef CONFIG_TASKS_TRACE_RCU
2100 	rcu_spawn_tasks_trace_kthread();
2101 #endif
2102 
2103 	// Run the self-tests.
2104 	rcu_tasks_initiate_self_tests();
2105 }
2106 
2107 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
2108 static inline void rcu_tasks_bootup_oddness(void) {}
2109 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
2110