xref: /linux/kernel/rcu/rcutorture.c (revision c288ea679840de4dee2ce6da5d0f139e3774ad86)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update module-based torture test facility
4  *
5  * Copyright (C) IBM Corporation, 2005, 2006
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  *	  Josh Triplett <josh@joshtriplett.org>
9  *
10  * See also:  Documentation/RCU/torture.rst
11  */
12 
13 #define pr_fmt(fmt) fmt
14 
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
47 #include <linux/tick.h>
48 #include <linux/rcupdate_trace.h>
49 #include <linux/nmi.h>
50 
51 #include "rcu.h"
52 
53 MODULE_LICENSE("GPL");
54 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
55 
56 /* Bits for ->extendables field, extendables param, and related definitions. */
57 #define RCUTORTURE_RDR_SHIFT_1	 8	/* Put SRCU index in upper bits. */
58 #define RCUTORTURE_RDR_MASK_1	 (1 << RCUTORTURE_RDR_SHIFT_1)
59 #define RCUTORTURE_RDR_SHIFT_2	 9	/* Put SRCU index in upper bits. */
60 #define RCUTORTURE_RDR_MASK_2	 (1 << RCUTORTURE_RDR_SHIFT_2)
61 #define RCUTORTURE_RDR_BH	 0x01	/* Extend readers by disabling bh. */
62 #define RCUTORTURE_RDR_IRQ	 0x02	/*  ... disabling interrupts. */
63 #define RCUTORTURE_RDR_PREEMPT	 0x04	/*  ... disabling preemption. */
64 #define RCUTORTURE_RDR_RBH	 0x08	/*  ... rcu_read_lock_bh(). */
65 #define RCUTORTURE_RDR_SCHED	 0x10	/*  ... rcu_read_lock_sched(). */
66 #define RCUTORTURE_RDR_RCU_1	 0x20	/*  ... entering another RCU reader. */
67 #define RCUTORTURE_RDR_RCU_2	 0x40	/*  ... entering another RCU reader. */
68 #define RCUTORTURE_RDR_NBITS	 7	/* Number of bits defined above. */
69 #define RCUTORTURE_MAX_EXTEND	 \
70 	(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
71 	 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
72 #define RCUTORTURE_RDR_MAX_LOOPS 0x7	/* Maximum reader extensions. */
73 					/* Must be power of two minus one. */
74 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
75 
76 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
77 	      "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
78 torture_param(int, fqs_duration, 0,
79 	      "Duration of fqs bursts (us), 0 to disable");
80 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
81 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
82 torture_param(int, fwd_progress, 1, "Test grace-period forward progress");
83 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
84 torture_param(int, fwd_progress_holdoff, 60,
85 	      "Time between forward-progress tests (s)");
86 torture_param(bool, fwd_progress_need_resched, 1,
87 	      "Hide cond_resched() behind need_resched()");
88 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
89 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
90 torture_param(bool, gp_normal, false,
91 	     "Use normal (non-expedited) GP wait primitives");
92 torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
93 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
94 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
95 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
96 torture_param(int, n_barrier_cbs, 0,
97 	     "# of callbacks/kthreads for barrier testing");
98 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
99 torture_param(int, nreaders, -1, "Number of RCU reader threads");
100 torture_param(int, object_debug, 0,
101 	     "Enable debug-object double call_rcu() testing");
102 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
103 torture_param(int, onoff_interval, 0,
104 	     "Time between CPU hotplugs (jiffies), 0=disable");
105 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
106 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
107 torture_param(int, read_exit_delay, 13,
108 	      "Delay between read-then-exit episodes (s)");
109 torture_param(int, read_exit_burst, 16,
110 	      "# of read-then-exit bursts per episode, zero to disable");
111 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
112 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
113 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
114 torture_param(int, stall_cpu_holdoff, 10,
115 	     "Time to wait before starting stall (s).");
116 torture_param(bool, stall_no_softlockup, false,
117 	     "Avoid softlockup warning during cpu stall.");
118 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
119 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
120 torture_param(int, stall_gp_kthread, 0,
121 	      "Grace-period kthread stall duration (s).");
122 torture_param(int, stat_interval, 60,
123 	     "Number of seconds between stats printk()s");
124 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
125 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
126 torture_param(int, test_boost_duration, 4,
127 	     "Duration of each boost test, seconds.");
128 torture_param(int, test_boost_interval, 7,
129 	     "Interval between boost tests, seconds.");
130 torture_param(bool, test_no_idle_hz, true,
131 	     "Test support for tickless idle CPUs");
132 torture_param(int, verbose, 1,
133 	     "Enable verbose debugging printk()s");
134 
135 static char *torture_type = "rcu";
136 module_param(torture_type, charp, 0444);
137 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
138 
139 static int nrealnocbers;
140 static int nrealreaders;
141 static struct task_struct *writer_task;
142 static struct task_struct **fakewriter_tasks;
143 static struct task_struct **reader_tasks;
144 static struct task_struct **nocb_tasks;
145 static struct task_struct *stats_task;
146 static struct task_struct *fqs_task;
147 static struct task_struct *boost_tasks[NR_CPUS];
148 static struct task_struct *stall_task;
149 static struct task_struct **fwd_prog_tasks;
150 static struct task_struct **barrier_cbs_tasks;
151 static struct task_struct *barrier_task;
152 static struct task_struct *read_exit_task;
153 
154 #define RCU_TORTURE_PIPE_LEN 10
155 
156 // Mailbox-like structure to check RCU global memory ordering.
157 struct rcu_torture_reader_check {
158 	unsigned long rtc_myloops;
159 	int rtc_chkrdr;
160 	unsigned long rtc_chkloops;
161 	int rtc_ready;
162 	struct rcu_torture_reader_check *rtc_assigner;
163 } ____cacheline_internodealigned_in_smp;
164 
165 // Update-side data structure used to check RCU readers.
166 struct rcu_torture {
167 	struct rcu_head rtort_rcu;
168 	int rtort_pipe_count;
169 	struct list_head rtort_free;
170 	int rtort_mbtest;
171 	struct rcu_torture_reader_check *rtort_chkp;
172 };
173 
174 static LIST_HEAD(rcu_torture_freelist);
175 static struct rcu_torture __rcu *rcu_torture_current;
176 static unsigned long rcu_torture_current_version;
177 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
178 static DEFINE_SPINLOCK(rcu_torture_lock);
179 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
180 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
181 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
182 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
183 static atomic_t n_rcu_torture_alloc;
184 static atomic_t n_rcu_torture_alloc_fail;
185 static atomic_t n_rcu_torture_free;
186 static atomic_t n_rcu_torture_mberror;
187 static atomic_t n_rcu_torture_mbchk_fail;
188 static atomic_t n_rcu_torture_mbchk_tries;
189 static atomic_t n_rcu_torture_error;
190 static long n_rcu_torture_barrier_error;
191 static long n_rcu_torture_boost_ktrerror;
192 static long n_rcu_torture_boost_rterror;
193 static long n_rcu_torture_boost_failure;
194 static long n_rcu_torture_boosts;
195 static atomic_long_t n_rcu_torture_timers;
196 static long n_barrier_attempts;
197 static long n_barrier_successes; /* did rcu_barrier test succeed? */
198 static unsigned long n_read_exits;
199 static struct list_head rcu_torture_removed;
200 static unsigned long shutdown_jiffies;
201 static unsigned long start_gp_seq;
202 static atomic_long_t n_nocb_offload;
203 static atomic_long_t n_nocb_deoffload;
204 
205 static int rcu_torture_writer_state;
206 #define RTWS_FIXED_DELAY	0
207 #define RTWS_DELAY		1
208 #define RTWS_REPLACE		2
209 #define RTWS_DEF_FREE		3
210 #define RTWS_EXP_SYNC		4
211 #define RTWS_COND_GET		5
212 #define RTWS_COND_SYNC		6
213 #define RTWS_POLL_GET		7
214 #define RTWS_POLL_WAIT		8
215 #define RTWS_SYNC		9
216 #define RTWS_STUTTER		10
217 #define RTWS_STOPPING		11
218 static const char * const rcu_torture_writer_state_names[] = {
219 	"RTWS_FIXED_DELAY",
220 	"RTWS_DELAY",
221 	"RTWS_REPLACE",
222 	"RTWS_DEF_FREE",
223 	"RTWS_EXP_SYNC",
224 	"RTWS_COND_GET",
225 	"RTWS_COND_SYNC",
226 	"RTWS_POLL_GET",
227 	"RTWS_POLL_WAIT",
228 	"RTWS_SYNC",
229 	"RTWS_STUTTER",
230 	"RTWS_STOPPING",
231 };
232 
233 /* Record reader segment types and duration for first failing read. */
234 struct rt_read_seg {
235 	int rt_readstate;
236 	unsigned long rt_delay_jiffies;
237 	unsigned long rt_delay_ms;
238 	unsigned long rt_delay_us;
239 	bool rt_preempted;
240 };
241 static int err_segs_recorded;
242 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
243 static int rt_read_nsegs;
244 
245 static const char *rcu_torture_writer_state_getname(void)
246 {
247 	unsigned int i = READ_ONCE(rcu_torture_writer_state);
248 
249 	if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
250 		return "???";
251 	return rcu_torture_writer_state_names[i];
252 }
253 
254 #ifdef CONFIG_RCU_TRACE
255 static u64 notrace rcu_trace_clock_local(void)
256 {
257 	u64 ts = trace_clock_local();
258 
259 	(void)do_div(ts, NSEC_PER_USEC);
260 	return ts;
261 }
262 #else /* #ifdef CONFIG_RCU_TRACE */
263 static u64 notrace rcu_trace_clock_local(void)
264 {
265 	return 0ULL;
266 }
267 #endif /* #else #ifdef CONFIG_RCU_TRACE */
268 
269 /*
270  * Stop aggressive CPU-hog tests a bit before the end of the test in order
271  * to avoid interfering with test shutdown.
272  */
273 static bool shutdown_time_arrived(void)
274 {
275 	return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
276 }
277 
278 static unsigned long boost_starttime;	/* jiffies of next boost test start. */
279 static DEFINE_MUTEX(boost_mutex);	/* protect setting boost_starttime */
280 					/*  and boost task create/destroy. */
281 static atomic_t barrier_cbs_count;	/* Barrier callbacks registered. */
282 static bool barrier_phase;		/* Test phase. */
283 static atomic_t barrier_cbs_invoked;	/* Barrier callbacks invoked. */
284 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
285 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
286 
287 static bool rcu_fwd_cb_nodelay;		/* Short rcu_torture_delay() delays. */
288 
289 /*
290  * Allocate an element from the rcu_tortures pool.
291  */
292 static struct rcu_torture *
293 rcu_torture_alloc(void)
294 {
295 	struct list_head *p;
296 
297 	spin_lock_bh(&rcu_torture_lock);
298 	if (list_empty(&rcu_torture_freelist)) {
299 		atomic_inc(&n_rcu_torture_alloc_fail);
300 		spin_unlock_bh(&rcu_torture_lock);
301 		return NULL;
302 	}
303 	atomic_inc(&n_rcu_torture_alloc);
304 	p = rcu_torture_freelist.next;
305 	list_del_init(p);
306 	spin_unlock_bh(&rcu_torture_lock);
307 	return container_of(p, struct rcu_torture, rtort_free);
308 }
309 
310 /*
311  * Free an element to the rcu_tortures pool.
312  */
313 static void
314 rcu_torture_free(struct rcu_torture *p)
315 {
316 	atomic_inc(&n_rcu_torture_free);
317 	spin_lock_bh(&rcu_torture_lock);
318 	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
319 	spin_unlock_bh(&rcu_torture_lock);
320 }
321 
322 /*
323  * Operations vector for selecting different types of tests.
324  */
325 
326 struct rcu_torture_ops {
327 	int ttype;
328 	void (*init)(void);
329 	void (*cleanup)(void);
330 	int (*readlock)(void);
331 	void (*read_delay)(struct torture_random_state *rrsp,
332 			   struct rt_read_seg *rtrsp);
333 	void (*readunlock)(int idx);
334 	int (*readlock_held)(void);
335 	unsigned long (*get_gp_seq)(void);
336 	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
337 	void (*deferred_free)(struct rcu_torture *p);
338 	void (*sync)(void);
339 	void (*exp_sync)(void);
340 	unsigned long (*get_gp_state)(void);
341 	unsigned long (*start_gp_poll)(void);
342 	bool (*poll_gp_state)(unsigned long oldstate);
343 	void (*cond_sync)(unsigned long oldstate);
344 	call_rcu_func_t call;
345 	void (*cb_barrier)(void);
346 	void (*fqs)(void);
347 	void (*stats)(void);
348 	void (*gp_kthread_dbg)(void);
349 	bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
350 	int (*stall_dur)(void);
351 	long cbflood_max;
352 	int irq_capable;
353 	int can_boost;
354 	int extendables;
355 	int slow_gps;
356 	int no_pi_lock;
357 	const char *name;
358 };
359 
360 static struct rcu_torture_ops *cur_ops;
361 
362 /*
363  * Definitions for rcu torture testing.
364  */
365 
366 static int torture_readlock_not_held(void)
367 {
368 	return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
369 }
370 
371 static int rcu_torture_read_lock(void) __acquires(RCU)
372 {
373 	rcu_read_lock();
374 	return 0;
375 }
376 
377 static void
378 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
379 {
380 	unsigned long started;
381 	unsigned long completed;
382 	const unsigned long shortdelay_us = 200;
383 	unsigned long longdelay_ms = 300;
384 	unsigned long long ts;
385 
386 	/* We want a short delay sometimes to make a reader delay the grace
387 	 * period, and we want a long delay occasionally to trigger
388 	 * force_quiescent_state. */
389 
390 	if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
391 	    !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
392 		started = cur_ops->get_gp_seq();
393 		ts = rcu_trace_clock_local();
394 		if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
395 			longdelay_ms = 5; /* Avoid triggering BH limits. */
396 		mdelay(longdelay_ms);
397 		rtrsp->rt_delay_ms = longdelay_ms;
398 		completed = cur_ops->get_gp_seq();
399 		do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
400 					  started, completed);
401 	}
402 	if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
403 		udelay(shortdelay_us);
404 		rtrsp->rt_delay_us = shortdelay_us;
405 	}
406 	if (!preempt_count() &&
407 	    !(torture_random(rrsp) % (nrealreaders * 500))) {
408 		torture_preempt_schedule();  /* QS only if preemptible. */
409 		rtrsp->rt_preempted = true;
410 	}
411 }
412 
413 static void rcu_torture_read_unlock(int idx) __releases(RCU)
414 {
415 	rcu_read_unlock();
416 }
417 
418 /*
419  * Update callback in the pipe.  This should be invoked after a grace period.
420  */
421 static bool
422 rcu_torture_pipe_update_one(struct rcu_torture *rp)
423 {
424 	int i;
425 	struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
426 
427 	if (rtrcp) {
428 		WRITE_ONCE(rp->rtort_chkp, NULL);
429 		smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
430 	}
431 	i = READ_ONCE(rp->rtort_pipe_count);
432 	if (i > RCU_TORTURE_PIPE_LEN)
433 		i = RCU_TORTURE_PIPE_LEN;
434 	atomic_inc(&rcu_torture_wcount[i]);
435 	WRITE_ONCE(rp->rtort_pipe_count, i + 1);
436 	if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
437 		rp->rtort_mbtest = 0;
438 		return true;
439 	}
440 	return false;
441 }
442 
443 /*
444  * Update all callbacks in the pipe.  Suitable for synchronous grace-period
445  * primitives.
446  */
447 static void
448 rcu_torture_pipe_update(struct rcu_torture *old_rp)
449 {
450 	struct rcu_torture *rp;
451 	struct rcu_torture *rp1;
452 
453 	if (old_rp)
454 		list_add(&old_rp->rtort_free, &rcu_torture_removed);
455 	list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
456 		if (rcu_torture_pipe_update_one(rp)) {
457 			list_del(&rp->rtort_free);
458 			rcu_torture_free(rp);
459 		}
460 	}
461 }
462 
463 static void
464 rcu_torture_cb(struct rcu_head *p)
465 {
466 	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
467 
468 	if (torture_must_stop_irq()) {
469 		/* Test is ending, just drop callbacks on the floor. */
470 		/* The next initialization will pick up the pieces. */
471 		return;
472 	}
473 	if (rcu_torture_pipe_update_one(rp))
474 		rcu_torture_free(rp);
475 	else
476 		cur_ops->deferred_free(rp);
477 }
478 
479 static unsigned long rcu_no_completed(void)
480 {
481 	return 0;
482 }
483 
484 static void rcu_torture_deferred_free(struct rcu_torture *p)
485 {
486 	call_rcu(&p->rtort_rcu, rcu_torture_cb);
487 }
488 
489 static void rcu_sync_torture_init(void)
490 {
491 	INIT_LIST_HEAD(&rcu_torture_removed);
492 }
493 
494 static struct rcu_torture_ops rcu_ops = {
495 	.ttype			= RCU_FLAVOR,
496 	.init			= rcu_sync_torture_init,
497 	.readlock		= rcu_torture_read_lock,
498 	.read_delay		= rcu_read_delay,
499 	.readunlock		= rcu_torture_read_unlock,
500 	.readlock_held		= torture_readlock_not_held,
501 	.get_gp_seq		= rcu_get_gp_seq,
502 	.gp_diff		= rcu_seq_diff,
503 	.deferred_free		= rcu_torture_deferred_free,
504 	.sync			= synchronize_rcu,
505 	.exp_sync		= synchronize_rcu_expedited,
506 	.get_gp_state		= get_state_synchronize_rcu,
507 	.start_gp_poll		= start_poll_synchronize_rcu,
508 	.poll_gp_state		= poll_state_synchronize_rcu,
509 	.cond_sync		= cond_synchronize_rcu,
510 	.call			= call_rcu,
511 	.cb_barrier		= rcu_barrier,
512 	.fqs			= rcu_force_quiescent_state,
513 	.stats			= NULL,
514 	.gp_kthread_dbg		= show_rcu_gp_kthreads,
515 	.check_boost_failed	= rcu_check_boost_fail,
516 	.stall_dur		= rcu_jiffies_till_stall_check,
517 	.irq_capable		= 1,
518 	.can_boost		= IS_ENABLED(CONFIG_RCU_BOOST),
519 	.extendables		= RCUTORTURE_MAX_EXTEND,
520 	.name			= "rcu"
521 };
522 
523 /*
524  * Don't even think about trying any of these in real life!!!
525  * The names includes "busted", and they really means it!
526  * The only purpose of these functions is to provide a buggy RCU
527  * implementation to make sure that rcutorture correctly emits
528  * buggy-RCU error messages.
529  */
530 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
531 {
532 	/* This is a deliberate bug for testing purposes only! */
533 	rcu_torture_cb(&p->rtort_rcu);
534 }
535 
536 static void synchronize_rcu_busted(void)
537 {
538 	/* This is a deliberate bug for testing purposes only! */
539 }
540 
541 static void
542 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
543 {
544 	/* This is a deliberate bug for testing purposes only! */
545 	func(head);
546 }
547 
548 static struct rcu_torture_ops rcu_busted_ops = {
549 	.ttype		= INVALID_RCU_FLAVOR,
550 	.init		= rcu_sync_torture_init,
551 	.readlock	= rcu_torture_read_lock,
552 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
553 	.readunlock	= rcu_torture_read_unlock,
554 	.readlock_held	= torture_readlock_not_held,
555 	.get_gp_seq	= rcu_no_completed,
556 	.deferred_free	= rcu_busted_torture_deferred_free,
557 	.sync		= synchronize_rcu_busted,
558 	.exp_sync	= synchronize_rcu_busted,
559 	.call		= call_rcu_busted,
560 	.cb_barrier	= NULL,
561 	.fqs		= NULL,
562 	.stats		= NULL,
563 	.irq_capable	= 1,
564 	.name		= "busted"
565 };
566 
567 /*
568  * Definitions for srcu torture testing.
569  */
570 
571 DEFINE_STATIC_SRCU(srcu_ctl);
572 static struct srcu_struct srcu_ctld;
573 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
574 
575 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
576 {
577 	return srcu_read_lock(srcu_ctlp);
578 }
579 
580 static void
581 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
582 {
583 	long delay;
584 	const long uspertick = 1000000 / HZ;
585 	const long longdelay = 10;
586 
587 	/* We want there to be long-running readers, but not all the time. */
588 
589 	delay = torture_random(rrsp) %
590 		(nrealreaders * 2 * longdelay * uspertick);
591 	if (!delay && in_task()) {
592 		schedule_timeout_interruptible(longdelay);
593 		rtrsp->rt_delay_jiffies = longdelay;
594 	} else {
595 		rcu_read_delay(rrsp, rtrsp);
596 	}
597 }
598 
599 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
600 {
601 	srcu_read_unlock(srcu_ctlp, idx);
602 }
603 
604 static int torture_srcu_read_lock_held(void)
605 {
606 	return srcu_read_lock_held(srcu_ctlp);
607 }
608 
609 static unsigned long srcu_torture_completed(void)
610 {
611 	return srcu_batches_completed(srcu_ctlp);
612 }
613 
614 static void srcu_torture_deferred_free(struct rcu_torture *rp)
615 {
616 	call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
617 }
618 
619 static void srcu_torture_synchronize(void)
620 {
621 	synchronize_srcu(srcu_ctlp);
622 }
623 
624 static unsigned long srcu_torture_get_gp_state(void)
625 {
626 	return get_state_synchronize_srcu(srcu_ctlp);
627 }
628 
629 static unsigned long srcu_torture_start_gp_poll(void)
630 {
631 	return start_poll_synchronize_srcu(srcu_ctlp);
632 }
633 
634 static bool srcu_torture_poll_gp_state(unsigned long oldstate)
635 {
636 	return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
637 }
638 
639 static void srcu_torture_call(struct rcu_head *head,
640 			      rcu_callback_t func)
641 {
642 	call_srcu(srcu_ctlp, head, func);
643 }
644 
645 static void srcu_torture_barrier(void)
646 {
647 	srcu_barrier(srcu_ctlp);
648 }
649 
650 static void srcu_torture_stats(void)
651 {
652 	srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
653 }
654 
655 static void srcu_torture_synchronize_expedited(void)
656 {
657 	synchronize_srcu_expedited(srcu_ctlp);
658 }
659 
660 static struct rcu_torture_ops srcu_ops = {
661 	.ttype		= SRCU_FLAVOR,
662 	.init		= rcu_sync_torture_init,
663 	.readlock	= srcu_torture_read_lock,
664 	.read_delay	= srcu_read_delay,
665 	.readunlock	= srcu_torture_read_unlock,
666 	.readlock_held	= torture_srcu_read_lock_held,
667 	.get_gp_seq	= srcu_torture_completed,
668 	.deferred_free	= srcu_torture_deferred_free,
669 	.sync		= srcu_torture_synchronize,
670 	.exp_sync	= srcu_torture_synchronize_expedited,
671 	.get_gp_state	= srcu_torture_get_gp_state,
672 	.start_gp_poll	= srcu_torture_start_gp_poll,
673 	.poll_gp_state	= srcu_torture_poll_gp_state,
674 	.call		= srcu_torture_call,
675 	.cb_barrier	= srcu_torture_barrier,
676 	.stats		= srcu_torture_stats,
677 	.irq_capable	= 1,
678 	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
679 	.name		= "srcu"
680 };
681 
682 static void srcu_torture_init(void)
683 {
684 	rcu_sync_torture_init();
685 	WARN_ON(init_srcu_struct(&srcu_ctld));
686 	srcu_ctlp = &srcu_ctld;
687 }
688 
689 static void srcu_torture_cleanup(void)
690 {
691 	cleanup_srcu_struct(&srcu_ctld);
692 	srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
693 }
694 
695 /* As above, but dynamically allocated. */
696 static struct rcu_torture_ops srcud_ops = {
697 	.ttype		= SRCU_FLAVOR,
698 	.init		= srcu_torture_init,
699 	.cleanup	= srcu_torture_cleanup,
700 	.readlock	= srcu_torture_read_lock,
701 	.read_delay	= srcu_read_delay,
702 	.readunlock	= srcu_torture_read_unlock,
703 	.readlock_held	= torture_srcu_read_lock_held,
704 	.get_gp_seq	= srcu_torture_completed,
705 	.deferred_free	= srcu_torture_deferred_free,
706 	.sync		= srcu_torture_synchronize,
707 	.exp_sync	= srcu_torture_synchronize_expedited,
708 	.call		= srcu_torture_call,
709 	.cb_barrier	= srcu_torture_barrier,
710 	.stats		= srcu_torture_stats,
711 	.irq_capable	= 1,
712 	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
713 	.name		= "srcud"
714 };
715 
716 /* As above, but broken due to inappropriate reader extension. */
717 static struct rcu_torture_ops busted_srcud_ops = {
718 	.ttype		= SRCU_FLAVOR,
719 	.init		= srcu_torture_init,
720 	.cleanup	= srcu_torture_cleanup,
721 	.readlock	= srcu_torture_read_lock,
722 	.read_delay	= rcu_read_delay,
723 	.readunlock	= srcu_torture_read_unlock,
724 	.readlock_held	= torture_srcu_read_lock_held,
725 	.get_gp_seq	= srcu_torture_completed,
726 	.deferred_free	= srcu_torture_deferred_free,
727 	.sync		= srcu_torture_synchronize,
728 	.exp_sync	= srcu_torture_synchronize_expedited,
729 	.call		= srcu_torture_call,
730 	.cb_barrier	= srcu_torture_barrier,
731 	.stats		= srcu_torture_stats,
732 	.irq_capable	= 1,
733 	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
734 	.extendables	= RCUTORTURE_MAX_EXTEND,
735 	.name		= "busted_srcud"
736 };
737 
738 /*
739  * Definitions for RCU-tasks torture testing.
740  */
741 
742 static int tasks_torture_read_lock(void)
743 {
744 	return 0;
745 }
746 
747 static void tasks_torture_read_unlock(int idx)
748 {
749 }
750 
751 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
752 {
753 	call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
754 }
755 
756 static void synchronize_rcu_mult_test(void)
757 {
758 	synchronize_rcu_mult(call_rcu_tasks, call_rcu);
759 }
760 
761 static struct rcu_torture_ops tasks_ops = {
762 	.ttype		= RCU_TASKS_FLAVOR,
763 	.init		= rcu_sync_torture_init,
764 	.readlock	= tasks_torture_read_lock,
765 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
766 	.readunlock	= tasks_torture_read_unlock,
767 	.get_gp_seq	= rcu_no_completed,
768 	.deferred_free	= rcu_tasks_torture_deferred_free,
769 	.sync		= synchronize_rcu_tasks,
770 	.exp_sync	= synchronize_rcu_mult_test,
771 	.call		= call_rcu_tasks,
772 	.cb_barrier	= rcu_barrier_tasks,
773 	.gp_kthread_dbg	= show_rcu_tasks_classic_gp_kthread,
774 	.fqs		= NULL,
775 	.stats		= NULL,
776 	.irq_capable	= 1,
777 	.slow_gps	= 1,
778 	.name		= "tasks"
779 };
780 
781 /*
782  * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
783  * This implementation does not necessarily work well with CPU hotplug.
784  */
785 
786 static void synchronize_rcu_trivial(void)
787 {
788 	int cpu;
789 
790 	for_each_online_cpu(cpu) {
791 		rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
792 		WARN_ON_ONCE(raw_smp_processor_id() != cpu);
793 	}
794 }
795 
796 static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
797 {
798 	preempt_disable();
799 	return 0;
800 }
801 
802 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
803 {
804 	preempt_enable();
805 }
806 
807 static struct rcu_torture_ops trivial_ops = {
808 	.ttype		= RCU_TRIVIAL_FLAVOR,
809 	.init		= rcu_sync_torture_init,
810 	.readlock	= rcu_torture_read_lock_trivial,
811 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
812 	.readunlock	= rcu_torture_read_unlock_trivial,
813 	.readlock_held	= torture_readlock_not_held,
814 	.get_gp_seq	= rcu_no_completed,
815 	.sync		= synchronize_rcu_trivial,
816 	.exp_sync	= synchronize_rcu_trivial,
817 	.fqs		= NULL,
818 	.stats		= NULL,
819 	.irq_capable	= 1,
820 	.name		= "trivial"
821 };
822 
823 /*
824  * Definitions for rude RCU-tasks torture testing.
825  */
826 
827 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
828 {
829 	call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
830 }
831 
832 static struct rcu_torture_ops tasks_rude_ops = {
833 	.ttype		= RCU_TASKS_RUDE_FLAVOR,
834 	.init		= rcu_sync_torture_init,
835 	.readlock	= rcu_torture_read_lock_trivial,
836 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
837 	.readunlock	= rcu_torture_read_unlock_trivial,
838 	.get_gp_seq	= rcu_no_completed,
839 	.deferred_free	= rcu_tasks_rude_torture_deferred_free,
840 	.sync		= synchronize_rcu_tasks_rude,
841 	.exp_sync	= synchronize_rcu_tasks_rude,
842 	.call		= call_rcu_tasks_rude,
843 	.cb_barrier	= rcu_barrier_tasks_rude,
844 	.gp_kthread_dbg	= show_rcu_tasks_rude_gp_kthread,
845 	.cbflood_max	= 50000,
846 	.fqs		= NULL,
847 	.stats		= NULL,
848 	.irq_capable	= 1,
849 	.name		= "tasks-rude"
850 };
851 
852 /*
853  * Definitions for tracing RCU-tasks torture testing.
854  */
855 
856 static int tasks_tracing_torture_read_lock(void)
857 {
858 	rcu_read_lock_trace();
859 	return 0;
860 }
861 
862 static void tasks_tracing_torture_read_unlock(int idx)
863 {
864 	rcu_read_unlock_trace();
865 }
866 
867 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
868 {
869 	call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
870 }
871 
872 static struct rcu_torture_ops tasks_tracing_ops = {
873 	.ttype		= RCU_TASKS_TRACING_FLAVOR,
874 	.init		= rcu_sync_torture_init,
875 	.readlock	= tasks_tracing_torture_read_lock,
876 	.read_delay	= srcu_read_delay,  /* just reuse srcu's version. */
877 	.readunlock	= tasks_tracing_torture_read_unlock,
878 	.readlock_held	= rcu_read_lock_trace_held,
879 	.get_gp_seq	= rcu_no_completed,
880 	.deferred_free	= rcu_tasks_tracing_torture_deferred_free,
881 	.sync		= synchronize_rcu_tasks_trace,
882 	.exp_sync	= synchronize_rcu_tasks_trace,
883 	.call		= call_rcu_tasks_trace,
884 	.cb_barrier	= rcu_barrier_tasks_trace,
885 	.gp_kthread_dbg	= show_rcu_tasks_trace_gp_kthread,
886 	.cbflood_max	= 50000,
887 	.fqs		= NULL,
888 	.stats		= NULL,
889 	.irq_capable	= 1,
890 	.slow_gps	= 1,
891 	.name		= "tasks-tracing"
892 };
893 
894 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
895 {
896 	if (!cur_ops->gp_diff)
897 		return new - old;
898 	return cur_ops->gp_diff(new, old);
899 }
900 
901 /*
902  * RCU torture priority-boost testing.  Runs one real-time thread per
903  * CPU for moderate bursts, repeatedly starting grace periods and waiting
904  * for them to complete.  If a given grace period takes too long, we assume
905  * that priority inversion has occurred.
906  */
907 
908 static int old_rt_runtime = -1;
909 
910 static void rcu_torture_disable_rt_throttle(void)
911 {
912 	/*
913 	 * Disable RT throttling so that rcutorture's boost threads don't get
914 	 * throttled. Only possible if rcutorture is built-in otherwise the
915 	 * user should manually do this by setting the sched_rt_period_us and
916 	 * sched_rt_runtime sysctls.
917 	 */
918 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
919 		return;
920 
921 	old_rt_runtime = sysctl_sched_rt_runtime;
922 	sysctl_sched_rt_runtime = -1;
923 }
924 
925 static void rcu_torture_enable_rt_throttle(void)
926 {
927 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
928 		return;
929 
930 	sysctl_sched_rt_runtime = old_rt_runtime;
931 	old_rt_runtime = -1;
932 }
933 
934 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
935 {
936 	int cpu;
937 	static int dbg_done;
938 	unsigned long end = jiffies;
939 	bool gp_done;
940 	unsigned long j;
941 	static unsigned long last_persist;
942 	unsigned long lp;
943 	unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
944 
945 	if (end - *start > mininterval) {
946 		// Recheck after checking time to avoid false positives.
947 		smp_mb(); // Time check before grace-period check.
948 		if (cur_ops->poll_gp_state(gp_state))
949 			return false; // passed, though perhaps just barely
950 		if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
951 			// At most one persisted message per boost test.
952 			j = jiffies;
953 			lp = READ_ONCE(last_persist);
954 			if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
955 				pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
956 			return false; // passed on a technicality
957 		}
958 		VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
959 		n_rcu_torture_boost_failure++;
960 		if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
961 			pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
962 				current->rt_priority, gp_state, end - *start);
963 			cur_ops->gp_kthread_dbg();
964 			// Recheck after print to flag grace period ending during splat.
965 			gp_done = cur_ops->poll_gp_state(gp_state);
966 			pr_info("Boost inversion: GP %lu %s.\n", gp_state,
967 				gp_done ? "ended already" : "still pending");
968 
969 		}
970 
971 		return true; // failed
972 	} else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
973 		*start = jiffies;
974 	}
975 
976 	return false; // passed
977 }
978 
979 static int rcu_torture_boost(void *arg)
980 {
981 	unsigned long endtime;
982 	unsigned long gp_state;
983 	unsigned long gp_state_time;
984 	unsigned long oldstarttime;
985 
986 	VERBOSE_TOROUT_STRING("rcu_torture_boost started");
987 
988 	/* Set real-time priority. */
989 	sched_set_fifo_low(current);
990 
991 	/* Each pass through the following loop does one boost-test cycle. */
992 	do {
993 		bool failed = false; // Test failed already in this test interval
994 		bool gp_initiated = false;
995 
996 		if (kthread_should_stop())
997 			goto checkwait;
998 
999 		/* Wait for the next test interval. */
1000 		oldstarttime = boost_starttime;
1001 		while (time_before(jiffies, oldstarttime)) {
1002 			schedule_timeout_interruptible(oldstarttime - jiffies);
1003 			if (stutter_wait("rcu_torture_boost"))
1004 				sched_set_fifo_low(current);
1005 			if (torture_must_stop())
1006 				goto checkwait;
1007 		}
1008 
1009 		// Do one boost-test interval.
1010 		endtime = oldstarttime + test_boost_duration * HZ;
1011 		while (time_before(jiffies, endtime)) {
1012 			// Has current GP gone too long?
1013 			if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1014 				failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1015 			// If we don't have a grace period in flight, start one.
1016 			if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1017 				gp_state = cur_ops->start_gp_poll();
1018 				gp_initiated = true;
1019 				gp_state_time = jiffies;
1020 			}
1021 			if (stutter_wait("rcu_torture_boost")) {
1022 				sched_set_fifo_low(current);
1023 				// If the grace period already ended,
1024 				// we don't know when that happened, so
1025 				// start over.
1026 				if (cur_ops->poll_gp_state(gp_state))
1027 					gp_initiated = false;
1028 			}
1029 			if (torture_must_stop())
1030 				goto checkwait;
1031 		}
1032 
1033 		// In case the grace period extended beyond the end of the loop.
1034 		if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1035 			rcu_torture_boost_failed(gp_state, &gp_state_time);
1036 
1037 		/*
1038 		 * Set the start time of the next test interval.
1039 		 * Yes, this is vulnerable to long delays, but such
1040 		 * delays simply cause a false negative for the next
1041 		 * interval.  Besides, we are running at RT priority,
1042 		 * so delays should be relatively rare.
1043 		 */
1044 		while (oldstarttime == boost_starttime && !kthread_should_stop()) {
1045 			if (mutex_trylock(&boost_mutex)) {
1046 				if (oldstarttime == boost_starttime) {
1047 					boost_starttime = jiffies + test_boost_interval * HZ;
1048 					n_rcu_torture_boosts++;
1049 				}
1050 				mutex_unlock(&boost_mutex);
1051 				break;
1052 			}
1053 			schedule_timeout_uninterruptible(1);
1054 		}
1055 
1056 		/* Go do the stutter. */
1057 checkwait:	if (stutter_wait("rcu_torture_boost"))
1058 			sched_set_fifo_low(current);
1059 	} while (!torture_must_stop());
1060 
1061 	/* Clean up and exit. */
1062 	while (!kthread_should_stop()) {
1063 		torture_shutdown_absorb("rcu_torture_boost");
1064 		schedule_timeout_uninterruptible(1);
1065 	}
1066 	torture_kthread_stopping("rcu_torture_boost");
1067 	return 0;
1068 }
1069 
1070 /*
1071  * RCU torture force-quiescent-state kthread.  Repeatedly induces
1072  * bursts of calls to force_quiescent_state(), increasing the probability
1073  * of occurrence of some important types of race conditions.
1074  */
1075 static int
1076 rcu_torture_fqs(void *arg)
1077 {
1078 	unsigned long fqs_resume_time;
1079 	int fqs_burst_remaining;
1080 	int oldnice = task_nice(current);
1081 
1082 	VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1083 	do {
1084 		fqs_resume_time = jiffies + fqs_stutter * HZ;
1085 		while (time_before(jiffies, fqs_resume_time) &&
1086 		       !kthread_should_stop()) {
1087 			schedule_timeout_interruptible(1);
1088 		}
1089 		fqs_burst_remaining = fqs_duration;
1090 		while (fqs_burst_remaining > 0 &&
1091 		       !kthread_should_stop()) {
1092 			cur_ops->fqs();
1093 			udelay(fqs_holdoff);
1094 			fqs_burst_remaining -= fqs_holdoff;
1095 		}
1096 		if (stutter_wait("rcu_torture_fqs"))
1097 			sched_set_normal(current, oldnice);
1098 	} while (!torture_must_stop());
1099 	torture_kthread_stopping("rcu_torture_fqs");
1100 	return 0;
1101 }
1102 
1103 // Used by writers to randomly choose from the available grace-period
1104 // primitives.  The only purpose of the initialization is to size the array.
1105 static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC };
1106 static int nsynctypes;
1107 
1108 /*
1109  * Determine which grace-period primitives are available.
1110  */
1111 static void rcu_torture_write_types(void)
1112 {
1113 	bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
1114 	bool gp_poll1 = gp_poll, gp_sync1 = gp_sync;
1115 
1116 	/* Initialize synctype[] array.  If none set, take default. */
1117 	if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1)
1118 		gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true;
1119 	if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1120 		synctype[nsynctypes++] = RTWS_COND_GET;
1121 		pr_info("%s: Testing conditional GPs.\n", __func__);
1122 	} else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1123 		pr_alert("%s: gp_cond without primitives.\n", __func__);
1124 	}
1125 	if (gp_exp1 && cur_ops->exp_sync) {
1126 		synctype[nsynctypes++] = RTWS_EXP_SYNC;
1127 		pr_info("%s: Testing expedited GPs.\n", __func__);
1128 	} else if (gp_exp && !cur_ops->exp_sync) {
1129 		pr_alert("%s: gp_exp without primitives.\n", __func__);
1130 	}
1131 	if (gp_normal1 && cur_ops->deferred_free) {
1132 		synctype[nsynctypes++] = RTWS_DEF_FREE;
1133 		pr_info("%s: Testing asynchronous GPs.\n", __func__);
1134 	} else if (gp_normal && !cur_ops->deferred_free) {
1135 		pr_alert("%s: gp_normal without primitives.\n", __func__);
1136 	}
1137 	if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1138 		synctype[nsynctypes++] = RTWS_POLL_GET;
1139 		pr_info("%s: Testing polling GPs.\n", __func__);
1140 	} else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1141 		pr_alert("%s: gp_poll without primitives.\n", __func__);
1142 	}
1143 	if (gp_sync1 && cur_ops->sync) {
1144 		synctype[nsynctypes++] = RTWS_SYNC;
1145 		pr_info("%s: Testing normal GPs.\n", __func__);
1146 	} else if (gp_sync && !cur_ops->sync) {
1147 		pr_alert("%s: gp_sync without primitives.\n", __func__);
1148 	}
1149 }
1150 
1151 /*
1152  * RCU torture writer kthread.  Repeatedly substitutes a new structure
1153  * for that pointed to by rcu_torture_current, freeing the old structure
1154  * after a series of grace periods (the "pipeline").
1155  */
1156 static int
1157 rcu_torture_writer(void *arg)
1158 {
1159 	bool boot_ended;
1160 	bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1161 	unsigned long cookie;
1162 	int expediting = 0;
1163 	unsigned long gp_snap;
1164 	int i;
1165 	int idx;
1166 	int oldnice = task_nice(current);
1167 	struct rcu_torture *rp;
1168 	struct rcu_torture *old_rp;
1169 	static DEFINE_TORTURE_RANDOM(rand);
1170 	bool stutter_waited;
1171 
1172 	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1173 	if (!can_expedite)
1174 		pr_alert("%s" TORTURE_FLAG
1175 			 " GP expediting controlled from boot/sysfs for %s.\n",
1176 			 torture_type, cur_ops->name);
1177 	if (WARN_ONCE(nsynctypes == 0,
1178 		      "rcu_torture_writer: No update-side primitives.\n")) {
1179 		/*
1180 		 * No updates primitives, so don't try updating.
1181 		 * The resulting test won't be testing much, hence the
1182 		 * above WARN_ONCE().
1183 		 */
1184 		rcu_torture_writer_state = RTWS_STOPPING;
1185 		torture_kthread_stopping("rcu_torture_writer");
1186 	}
1187 
1188 	do {
1189 		rcu_torture_writer_state = RTWS_FIXED_DELAY;
1190 		torture_hrtimeout_us(500, 1000, &rand);
1191 		rp = rcu_torture_alloc();
1192 		if (rp == NULL)
1193 			continue;
1194 		rp->rtort_pipe_count = 0;
1195 		rcu_torture_writer_state = RTWS_DELAY;
1196 		udelay(torture_random(&rand) & 0x3ff);
1197 		rcu_torture_writer_state = RTWS_REPLACE;
1198 		old_rp = rcu_dereference_check(rcu_torture_current,
1199 					       current == writer_task);
1200 		rp->rtort_mbtest = 1;
1201 		rcu_assign_pointer(rcu_torture_current, rp);
1202 		smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1203 		if (old_rp) {
1204 			i = old_rp->rtort_pipe_count;
1205 			if (i > RCU_TORTURE_PIPE_LEN)
1206 				i = RCU_TORTURE_PIPE_LEN;
1207 			atomic_inc(&rcu_torture_wcount[i]);
1208 			WRITE_ONCE(old_rp->rtort_pipe_count,
1209 				   old_rp->rtort_pipe_count + 1);
1210 			if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1211 				idx = cur_ops->readlock();
1212 				cookie = cur_ops->get_gp_state();
1213 				WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
1214 					  cur_ops->poll_gp_state(cookie),
1215 					  "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1216 					  __func__,
1217 					  rcu_torture_writer_state_getname(),
1218 					  rcu_torture_writer_state,
1219 					  cookie, cur_ops->get_gp_state());
1220 				cur_ops->readunlock(idx);
1221 			}
1222 			switch (synctype[torture_random(&rand) % nsynctypes]) {
1223 			case RTWS_DEF_FREE:
1224 				rcu_torture_writer_state = RTWS_DEF_FREE;
1225 				cur_ops->deferred_free(old_rp);
1226 				break;
1227 			case RTWS_EXP_SYNC:
1228 				rcu_torture_writer_state = RTWS_EXP_SYNC;
1229 				cur_ops->exp_sync();
1230 				rcu_torture_pipe_update(old_rp);
1231 				break;
1232 			case RTWS_COND_GET:
1233 				rcu_torture_writer_state = RTWS_COND_GET;
1234 				gp_snap = cur_ops->get_gp_state();
1235 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1236 				rcu_torture_writer_state = RTWS_COND_SYNC;
1237 				cur_ops->cond_sync(gp_snap);
1238 				rcu_torture_pipe_update(old_rp);
1239 				break;
1240 			case RTWS_POLL_GET:
1241 				rcu_torture_writer_state = RTWS_POLL_GET;
1242 				gp_snap = cur_ops->start_gp_poll();
1243 				rcu_torture_writer_state = RTWS_POLL_WAIT;
1244 				while (!cur_ops->poll_gp_state(gp_snap))
1245 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1246 								  &rand);
1247 				rcu_torture_pipe_update(old_rp);
1248 				break;
1249 			case RTWS_SYNC:
1250 				rcu_torture_writer_state = RTWS_SYNC;
1251 				cur_ops->sync();
1252 				rcu_torture_pipe_update(old_rp);
1253 				break;
1254 			default:
1255 				WARN_ON_ONCE(1);
1256 				break;
1257 			}
1258 		}
1259 		WRITE_ONCE(rcu_torture_current_version,
1260 			   rcu_torture_current_version + 1);
1261 		/* Cycle through nesting levels of rcu_expedite_gp() calls. */
1262 		if (can_expedite &&
1263 		    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1264 			WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1265 			if (expediting >= 0)
1266 				rcu_expedite_gp();
1267 			else
1268 				rcu_unexpedite_gp();
1269 			if (++expediting > 3)
1270 				expediting = -expediting;
1271 		} else if (!can_expedite) { /* Disabled during boot, recheck. */
1272 			can_expedite = !rcu_gp_is_expedited() &&
1273 				       !rcu_gp_is_normal();
1274 		}
1275 		rcu_torture_writer_state = RTWS_STUTTER;
1276 		boot_ended = rcu_inkernel_boot_has_ended();
1277 		stutter_waited = stutter_wait("rcu_torture_writer");
1278 		if (stutter_waited &&
1279 		    !READ_ONCE(rcu_fwd_cb_nodelay) &&
1280 		    !cur_ops->slow_gps &&
1281 		    !torture_must_stop() &&
1282 		    boot_ended)
1283 			for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1284 				if (list_empty(&rcu_tortures[i].rtort_free) &&
1285 				    rcu_access_pointer(rcu_torture_current) !=
1286 				    &rcu_tortures[i]) {
1287 					rcu_ftrace_dump(DUMP_ALL);
1288 					WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1289 				}
1290 		if (stutter_waited)
1291 			sched_set_normal(current, oldnice);
1292 	} while (!torture_must_stop());
1293 	rcu_torture_current = NULL;  // Let stats task know that we are done.
1294 	/* Reset expediting back to unexpedited. */
1295 	if (expediting > 0)
1296 		expediting = -expediting;
1297 	while (can_expedite && expediting++ < 0)
1298 		rcu_unexpedite_gp();
1299 	WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1300 	if (!can_expedite)
1301 		pr_alert("%s" TORTURE_FLAG
1302 			 " Dynamic grace-period expediting was disabled.\n",
1303 			 torture_type);
1304 	rcu_torture_writer_state = RTWS_STOPPING;
1305 	torture_kthread_stopping("rcu_torture_writer");
1306 	return 0;
1307 }
1308 
1309 /*
1310  * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
1311  * delay between calls.
1312  */
1313 static int
1314 rcu_torture_fakewriter(void *arg)
1315 {
1316 	unsigned long gp_snap;
1317 	DEFINE_TORTURE_RANDOM(rand);
1318 
1319 	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1320 	set_user_nice(current, MAX_NICE);
1321 
1322 	do {
1323 		torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1324 		if (cur_ops->cb_barrier != NULL &&
1325 		    torture_random(&rand) % (nfakewriters * 8) == 0) {
1326 			cur_ops->cb_barrier();
1327 		} else {
1328 			switch (synctype[torture_random(&rand) % nsynctypes]) {
1329 			case RTWS_DEF_FREE:
1330 				break;
1331 			case RTWS_EXP_SYNC:
1332 				cur_ops->exp_sync();
1333 				break;
1334 			case RTWS_COND_GET:
1335 				gp_snap = cur_ops->get_gp_state();
1336 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1337 				cur_ops->cond_sync(gp_snap);
1338 				break;
1339 			case RTWS_POLL_GET:
1340 				gp_snap = cur_ops->start_gp_poll();
1341 				while (!cur_ops->poll_gp_state(gp_snap)) {
1342 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1343 								  &rand);
1344 				}
1345 				break;
1346 			case RTWS_SYNC:
1347 				cur_ops->sync();
1348 				break;
1349 			default:
1350 				WARN_ON_ONCE(1);
1351 				break;
1352 			}
1353 		}
1354 		stutter_wait("rcu_torture_fakewriter");
1355 	} while (!torture_must_stop());
1356 
1357 	torture_kthread_stopping("rcu_torture_fakewriter");
1358 	return 0;
1359 }
1360 
1361 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1362 {
1363 	kfree(rhp);
1364 }
1365 
1366 // Set up and carry out testing of RCU's global memory ordering
1367 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1368 					struct torture_random_state *trsp)
1369 {
1370 	unsigned long loops;
1371 	int noc = torture_num_online_cpus();
1372 	int rdrchked;
1373 	int rdrchker;
1374 	struct rcu_torture_reader_check *rtrcp; // Me.
1375 	struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1376 	struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1377 	struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1378 
1379 	if (myid < 0)
1380 		return; // Don't try this from timer handlers.
1381 
1382 	// Increment my counter.
1383 	rtrcp = &rcu_torture_reader_mbchk[myid];
1384 	WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1385 
1386 	// Attempt to assign someone else some checking work.
1387 	rdrchked = torture_random(trsp) % nrealreaders;
1388 	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1389 	rdrchker = torture_random(trsp) % nrealreaders;
1390 	rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1391 	if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1392 	    smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1393 	    !READ_ONCE(rtp->rtort_chkp) &&
1394 	    !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1395 		rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1396 		WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1397 		rtrcp->rtc_chkrdr = rdrchked;
1398 		WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1399 		if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1400 		    cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1401 			(void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1402 	}
1403 
1404 	// If assigned some completed work, do it!
1405 	rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1406 	if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1407 		return; // No work or work not yet ready.
1408 	rdrchked = rtrcp_assigner->rtc_chkrdr;
1409 	if (WARN_ON_ONCE(rdrchked < 0))
1410 		return;
1411 	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1412 	loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1413 	atomic_inc(&n_rcu_torture_mbchk_tries);
1414 	if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1415 		atomic_inc(&n_rcu_torture_mbchk_fail);
1416 	rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1417 	rtrcp_assigner->rtc_ready = 0;
1418 	smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1419 	smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1420 }
1421 
1422 /*
1423  * Do one extension of an RCU read-side critical section using the
1424  * current reader state in readstate (set to zero for initial entry
1425  * to extended critical section), set the new state as specified by
1426  * newstate (set to zero for final exit from extended critical section),
1427  * and random-number-generator state in trsp.  If this is neither the
1428  * beginning or end of the critical section and if there was actually a
1429  * change, do a ->read_delay().
1430  */
1431 static void rcutorture_one_extend(int *readstate, int newstate,
1432 				  struct torture_random_state *trsp,
1433 				  struct rt_read_seg *rtrsp)
1434 {
1435 	unsigned long flags;
1436 	int idxnew1 = -1;
1437 	int idxnew2 = -1;
1438 	int idxold1 = *readstate;
1439 	int idxold2 = idxold1;
1440 	int statesnew = ~*readstate & newstate;
1441 	int statesold = *readstate & ~newstate;
1442 
1443 	WARN_ON_ONCE(idxold2 < 0);
1444 	WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1445 	rtrsp->rt_readstate = newstate;
1446 
1447 	/* First, put new protection in place to avoid critical-section gap. */
1448 	if (statesnew & RCUTORTURE_RDR_BH)
1449 		local_bh_disable();
1450 	if (statesnew & RCUTORTURE_RDR_RBH)
1451 		rcu_read_lock_bh();
1452 	if (statesnew & RCUTORTURE_RDR_IRQ)
1453 		local_irq_disable();
1454 	if (statesnew & RCUTORTURE_RDR_PREEMPT)
1455 		preempt_disable();
1456 	if (statesnew & RCUTORTURE_RDR_SCHED)
1457 		rcu_read_lock_sched();
1458 	if (statesnew & RCUTORTURE_RDR_RCU_1)
1459 		idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1;
1460 	if (statesnew & RCUTORTURE_RDR_RCU_2)
1461 		idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2;
1462 
1463 	/*
1464 	 * Next, remove old protection, in decreasing order of strength
1465 	 * to avoid unlock paths that aren't safe in the stronger
1466 	 * context. Namely: BH can not be enabled with disabled interrupts.
1467 	 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
1468 	 * context.
1469 	 */
1470 	if (statesold & RCUTORTURE_RDR_IRQ)
1471 		local_irq_enable();
1472 	if (statesold & RCUTORTURE_RDR_PREEMPT)
1473 		preempt_enable();
1474 	if (statesold & RCUTORTURE_RDR_SCHED)
1475 		rcu_read_unlock_sched();
1476 	if (statesold & RCUTORTURE_RDR_BH)
1477 		local_bh_enable();
1478 	if (statesold & RCUTORTURE_RDR_RBH)
1479 		rcu_read_unlock_bh();
1480 	if (statesold & RCUTORTURE_RDR_RCU_2) {
1481 		cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1);
1482 		WARN_ON_ONCE(idxnew2 != -1);
1483 		idxold2 = 0;
1484 	}
1485 	if (statesold & RCUTORTURE_RDR_RCU_1) {
1486 		bool lockit;
1487 
1488 		lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
1489 		if (lockit)
1490 			raw_spin_lock_irqsave(&current->pi_lock, flags);
1491 		cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1);
1492 		WARN_ON_ONCE(idxnew1 != -1);
1493 		idxold1 = 0;
1494 		if (lockit)
1495 			raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1496 	}
1497 
1498 	/* Delay if neither beginning nor end and there was a change. */
1499 	if ((statesnew || statesold) && *readstate && newstate)
1500 		cur_ops->read_delay(trsp, rtrsp);
1501 
1502 	/* Update the reader state. */
1503 	if (idxnew1 == -1)
1504 		idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
1505 	WARN_ON_ONCE(idxnew1 < 0);
1506 	if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1))
1507 		pr_info("Unexpected idxnew1 value of %#x\n", idxnew1);
1508 	if (idxnew2 == -1)
1509 		idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
1510 	WARN_ON_ONCE(idxnew2 < 0);
1511 	WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1512 	*readstate = idxnew1 | idxnew2 | newstate;
1513 	WARN_ON_ONCE(*readstate < 0);
1514 	if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1))
1515 		pr_info("Unexpected idxnew2 value of %#x\n", idxnew2);
1516 }
1517 
1518 /* Return the biggest extendables mask given current RCU and boot parameters. */
1519 static int rcutorture_extend_mask_max(void)
1520 {
1521 	int mask;
1522 
1523 	WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1524 	mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1525 	mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
1526 	return mask;
1527 }
1528 
1529 /* Return a random protection state mask, but with at least one bit set. */
1530 static int
1531 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1532 {
1533 	int mask = rcutorture_extend_mask_max();
1534 	unsigned long randmask1 = torture_random(trsp) >> 8;
1535 	unsigned long randmask2 = randmask1 >> 3;
1536 	unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1537 	unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
1538 	unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1539 
1540 	WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);
1541 	/* Mostly only one bit (need preemption!), sometimes lots of bits. */
1542 	if (!(randmask1 & 0x7))
1543 		mask = mask & randmask2;
1544 	else
1545 		mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1546 
1547 	// Can't have nested RCU reader without outer RCU reader.
1548 	if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
1549 		if (oldmask & RCUTORTURE_RDR_RCU_1)
1550 			mask &= ~RCUTORTURE_RDR_RCU_2;
1551 		else
1552 			mask |= RCUTORTURE_RDR_RCU_1;
1553 	}
1554 
1555 	/*
1556 	 * Can't enable bh w/irq disabled.
1557 	 */
1558 	if (mask & RCUTORTURE_RDR_IRQ)
1559 		mask |= oldmask & bhs;
1560 
1561 	/*
1562 	 * Ideally these sequences would be detected in debug builds
1563 	 * (regardless of RT), but until then don't stop testing
1564 	 * them on non-RT.
1565 	 */
1566 	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1567 		/* Can't modify BH in atomic context */
1568 		if (oldmask & preempts_irq)
1569 			mask &= ~bhs;
1570 		if ((oldmask | mask) & preempts_irq)
1571 			mask |= oldmask & bhs;
1572 	}
1573 
1574 	return mask ?: RCUTORTURE_RDR_RCU_1;
1575 }
1576 
1577 /*
1578  * Do a randomly selected number of extensions of an existing RCU read-side
1579  * critical section.
1580  */
1581 static struct rt_read_seg *
1582 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1583 		       struct rt_read_seg *rtrsp)
1584 {
1585 	int i;
1586 	int j;
1587 	int mask = rcutorture_extend_mask_max();
1588 
1589 	WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1590 	if (!((mask - 1) & mask))
1591 		return rtrsp;  /* Current RCU reader not extendable. */
1592 	/* Bias towards larger numbers of loops. */
1593 	i = (torture_random(trsp) >> 3);
1594 	i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1595 	for (j = 0; j < i; j++) {
1596 		mask = rcutorture_extend_mask(*readstate, trsp);
1597 		rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1598 	}
1599 	return &rtrsp[j];
1600 }
1601 
1602 /*
1603  * Do one read-side critical section, returning false if there was
1604  * no data to read.  Can be invoked both from process context and
1605  * from a timer handler.
1606  */
1607 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1608 {
1609 	unsigned long cookie;
1610 	int i;
1611 	unsigned long started;
1612 	unsigned long completed;
1613 	int newstate;
1614 	struct rcu_torture *p;
1615 	int pipe_count;
1616 	int readstate = 0;
1617 	struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1618 	struct rt_read_seg *rtrsp = &rtseg[0];
1619 	struct rt_read_seg *rtrsp1;
1620 	unsigned long long ts;
1621 
1622 	WARN_ON_ONCE(!rcu_is_watching());
1623 	newstate = rcutorture_extend_mask(readstate, trsp);
1624 	rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1625 	if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1626 		cookie = cur_ops->get_gp_state();
1627 	started = cur_ops->get_gp_seq();
1628 	ts = rcu_trace_clock_local();
1629 	p = rcu_dereference_check(rcu_torture_current,
1630 				  !cur_ops->readlock_held || cur_ops->readlock_held());
1631 	if (p == NULL) {
1632 		/* Wait for rcu_torture_writer to get underway */
1633 		rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1634 		return false;
1635 	}
1636 	if (p->rtort_mbtest == 0)
1637 		atomic_inc(&n_rcu_torture_mberror);
1638 	rcu_torture_reader_do_mbchk(myid, p, trsp);
1639 	rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1640 	preempt_disable();
1641 	pipe_count = READ_ONCE(p->rtort_pipe_count);
1642 	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1643 		/* Should not happen, but... */
1644 		pipe_count = RCU_TORTURE_PIPE_LEN;
1645 	}
1646 	completed = cur_ops->get_gp_seq();
1647 	if (pipe_count > 1) {
1648 		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1649 					  ts, started, completed);
1650 		rcu_ftrace_dump(DUMP_ALL);
1651 	}
1652 	__this_cpu_inc(rcu_torture_count[pipe_count]);
1653 	completed = rcutorture_seq_diff(completed, started);
1654 	if (completed > RCU_TORTURE_PIPE_LEN) {
1655 		/* Should not happen, but... */
1656 		completed = RCU_TORTURE_PIPE_LEN;
1657 	}
1658 	__this_cpu_inc(rcu_torture_batch[completed]);
1659 	preempt_enable();
1660 	if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1661 		WARN_ONCE(cur_ops->poll_gp_state(cookie),
1662 			  "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
1663 			  __func__,
1664 			  rcu_torture_writer_state_getname(),
1665 			  rcu_torture_writer_state,
1666 			  cookie, cur_ops->get_gp_state());
1667 	rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1668 	WARN_ON_ONCE(readstate);
1669 	// This next splat is expected behavior if leakpointer, especially
1670 	// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
1671 	WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
1672 
1673 	/* If error or close call, record the sequence of reader protections. */
1674 	if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1675 		i = 0;
1676 		for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1677 			err_segs[i++] = *rtrsp1;
1678 		rt_read_nsegs = i;
1679 	}
1680 
1681 	return true;
1682 }
1683 
1684 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1685 
1686 /*
1687  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
1688  * incrementing the corresponding element of the pipeline array.  The
1689  * counter in the element should never be greater than 1, otherwise, the
1690  * RCU implementation is broken.
1691  */
1692 static void rcu_torture_timer(struct timer_list *unused)
1693 {
1694 	atomic_long_inc(&n_rcu_torture_timers);
1695 	(void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
1696 
1697 	/* Test call_rcu() invocation from interrupt handler. */
1698 	if (cur_ops->call) {
1699 		struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1700 
1701 		if (rhp)
1702 			cur_ops->call(rhp, rcu_torture_timer_cb);
1703 	}
1704 }
1705 
1706 /*
1707  * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
1708  * incrementing the corresponding element of the pipeline array.  The
1709  * counter in the element should never be greater than 1, otherwise, the
1710  * RCU implementation is broken.
1711  */
1712 static int
1713 rcu_torture_reader(void *arg)
1714 {
1715 	unsigned long lastsleep = jiffies;
1716 	long myid = (long)arg;
1717 	int mynumonline = myid;
1718 	DEFINE_TORTURE_RANDOM(rand);
1719 	struct timer_list t;
1720 
1721 	VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1722 	set_user_nice(current, MAX_NICE);
1723 	if (irqreader && cur_ops->irq_capable)
1724 		timer_setup_on_stack(&t, rcu_torture_timer, 0);
1725 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
1726 	do {
1727 		if (irqreader && cur_ops->irq_capable) {
1728 			if (!timer_pending(&t))
1729 				mod_timer(&t, jiffies + 1);
1730 		}
1731 		if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
1732 			schedule_timeout_interruptible(HZ);
1733 		if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
1734 			torture_hrtimeout_us(500, 1000, &rand);
1735 			lastsleep = jiffies + 10;
1736 		}
1737 		while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
1738 			schedule_timeout_interruptible(HZ / 5);
1739 		stutter_wait("rcu_torture_reader");
1740 	} while (!torture_must_stop());
1741 	if (irqreader && cur_ops->irq_capable) {
1742 		del_timer_sync(&t);
1743 		destroy_timer_on_stack(&t);
1744 	}
1745 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
1746 	torture_kthread_stopping("rcu_torture_reader");
1747 	return 0;
1748 }
1749 
1750 /*
1751  * Randomly Toggle CPUs' callback-offload state.  This uses hrtimers to
1752  * increase race probabilities and fuzzes the interval between toggling.
1753  */
1754 static int rcu_nocb_toggle(void *arg)
1755 {
1756 	int cpu;
1757 	int maxcpu = -1;
1758 	int oldnice = task_nice(current);
1759 	long r;
1760 	DEFINE_TORTURE_RANDOM(rand);
1761 	ktime_t toggle_delay;
1762 	unsigned long toggle_fuzz;
1763 	ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
1764 
1765 	VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
1766 	while (!rcu_inkernel_boot_has_ended())
1767 		schedule_timeout_interruptible(HZ / 10);
1768 	for_each_online_cpu(cpu)
1769 		maxcpu = cpu;
1770 	WARN_ON(maxcpu < 0);
1771 	if (toggle_interval > ULONG_MAX)
1772 		toggle_fuzz = ULONG_MAX >> 3;
1773 	else
1774 		toggle_fuzz = toggle_interval >> 3;
1775 	if (toggle_fuzz <= 0)
1776 		toggle_fuzz = NSEC_PER_USEC;
1777 	do {
1778 		r = torture_random(&rand);
1779 		cpu = (r >> 4) % (maxcpu + 1);
1780 		if (r & 0x1) {
1781 			rcu_nocb_cpu_offload(cpu);
1782 			atomic_long_inc(&n_nocb_offload);
1783 		} else {
1784 			rcu_nocb_cpu_deoffload(cpu);
1785 			atomic_long_inc(&n_nocb_deoffload);
1786 		}
1787 		toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
1788 		set_current_state(TASK_INTERRUPTIBLE);
1789 		schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
1790 		if (stutter_wait("rcu_nocb_toggle"))
1791 			sched_set_normal(current, oldnice);
1792 	} while (!torture_must_stop());
1793 	torture_kthread_stopping("rcu_nocb_toggle");
1794 	return 0;
1795 }
1796 
1797 /*
1798  * Print torture statistics.  Caller must ensure that there is only
1799  * one call to this function at a given time!!!  This is normally
1800  * accomplished by relying on the module system to only have one copy
1801  * of the module loaded, and then by giving the rcu_torture_stats
1802  * kthread full control (or the init/cleanup functions when rcu_torture_stats
1803  * thread is not running).
1804  */
1805 static void
1806 rcu_torture_stats_print(void)
1807 {
1808 	int cpu;
1809 	int i;
1810 	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1811 	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1812 	struct rcu_torture *rtcp;
1813 	static unsigned long rtcv_snap = ULONG_MAX;
1814 	static bool splatted;
1815 	struct task_struct *wtp;
1816 
1817 	for_each_possible_cpu(cpu) {
1818 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1819 			pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1820 			batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
1821 		}
1822 	}
1823 	for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1824 		if (pipesummary[i] != 0)
1825 			break;
1826 	}
1827 
1828 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1829 	rtcp = rcu_access_pointer(rcu_torture_current);
1830 	pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1831 		rtcp,
1832 		rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
1833 		rcu_torture_current_version,
1834 		list_empty(&rcu_torture_freelist),
1835 		atomic_read(&n_rcu_torture_alloc),
1836 		atomic_read(&n_rcu_torture_alloc_fail),
1837 		atomic_read(&n_rcu_torture_free));
1838 	pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ",
1839 		atomic_read(&n_rcu_torture_mberror),
1840 		atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
1841 		n_rcu_torture_barrier_error,
1842 		n_rcu_torture_boost_ktrerror,
1843 		n_rcu_torture_boost_rterror);
1844 	pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1845 		n_rcu_torture_boost_failure,
1846 		n_rcu_torture_boosts,
1847 		atomic_long_read(&n_rcu_torture_timers));
1848 	torture_onoff_stats();
1849 	pr_cont("barrier: %ld/%ld:%ld ",
1850 		data_race(n_barrier_successes),
1851 		data_race(n_barrier_attempts),
1852 		data_race(n_rcu_torture_barrier_error));
1853 	pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
1854 	pr_cont("nocb-toggles: %ld:%ld\n",
1855 		atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
1856 
1857 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1858 	if (atomic_read(&n_rcu_torture_mberror) ||
1859 	    atomic_read(&n_rcu_torture_mbchk_fail) ||
1860 	    n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1861 	    n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
1862 	    i > 1) {
1863 		pr_cont("%s", "!!! ");
1864 		atomic_inc(&n_rcu_torture_error);
1865 		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
1866 		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
1867 		WARN_ON_ONCE(n_rcu_torture_barrier_error);  // rcu_barrier()
1868 		WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1869 		WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
1870 		WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
1871 		WARN_ON_ONCE(i > 1); // Too-short grace period
1872 	}
1873 	pr_cont("Reader Pipe: ");
1874 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1875 		pr_cont(" %ld", pipesummary[i]);
1876 	pr_cont("\n");
1877 
1878 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1879 	pr_cont("Reader Batch: ");
1880 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1881 		pr_cont(" %ld", batchsummary[i]);
1882 	pr_cont("\n");
1883 
1884 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1885 	pr_cont("Free-Block Circulation: ");
1886 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1887 		pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1888 	}
1889 	pr_cont("\n");
1890 
1891 	if (cur_ops->stats)
1892 		cur_ops->stats();
1893 	if (rtcv_snap == rcu_torture_current_version &&
1894 	    rcu_access_pointer(rcu_torture_current) &&
1895 	    !rcu_stall_is_suppressed()) {
1896 		int __maybe_unused flags = 0;
1897 		unsigned long __maybe_unused gp_seq = 0;
1898 
1899 		rcutorture_get_gp_data(cur_ops->ttype,
1900 				       &flags, &gp_seq);
1901 		srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1902 					&flags, &gp_seq);
1903 		wtp = READ_ONCE(writer_task);
1904 		pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
1905 			 rcu_torture_writer_state_getname(),
1906 			 rcu_torture_writer_state, gp_seq, flags,
1907 			 wtp == NULL ? ~0U : wtp->__state,
1908 			 wtp == NULL ? -1 : (int)task_cpu(wtp));
1909 		if (!splatted && wtp) {
1910 			sched_show_task(wtp);
1911 			splatted = true;
1912 		}
1913 		if (cur_ops->gp_kthread_dbg)
1914 			cur_ops->gp_kthread_dbg();
1915 		rcu_ftrace_dump(DUMP_ALL);
1916 	}
1917 	rtcv_snap = rcu_torture_current_version;
1918 }
1919 
1920 /*
1921  * Periodically prints torture statistics, if periodic statistics printing
1922  * was specified via the stat_interval module parameter.
1923  */
1924 static int
1925 rcu_torture_stats(void *arg)
1926 {
1927 	VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1928 	do {
1929 		schedule_timeout_interruptible(stat_interval * HZ);
1930 		rcu_torture_stats_print();
1931 		torture_shutdown_absorb("rcu_torture_stats");
1932 	} while (!torture_must_stop());
1933 	torture_kthread_stopping("rcu_torture_stats");
1934 	return 0;
1935 }
1936 
1937 /* Test mem_dump_obj() and friends.  */
1938 static void rcu_torture_mem_dump_obj(void)
1939 {
1940 	struct rcu_head *rhp;
1941 	struct kmem_cache *kcp;
1942 	static int z;
1943 
1944 	kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
1945 	rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
1946 	pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
1947 	pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
1948 	mem_dump_obj(ZERO_SIZE_PTR);
1949 	pr_alert("mem_dump_obj(NULL):");
1950 	mem_dump_obj(NULL);
1951 	pr_alert("mem_dump_obj(%px):", &rhp);
1952 	mem_dump_obj(&rhp);
1953 	pr_alert("mem_dump_obj(%px):", rhp);
1954 	mem_dump_obj(rhp);
1955 	pr_alert("mem_dump_obj(%px):", &rhp->func);
1956 	mem_dump_obj(&rhp->func);
1957 	pr_alert("mem_dump_obj(%px):", &z);
1958 	mem_dump_obj(&z);
1959 	kmem_cache_free(kcp, rhp);
1960 	kmem_cache_destroy(kcp);
1961 	rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
1962 	pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
1963 	pr_alert("mem_dump_obj(kmalloc %px):", rhp);
1964 	mem_dump_obj(rhp);
1965 	pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
1966 	mem_dump_obj(&rhp->func);
1967 	kfree(rhp);
1968 	rhp = vmalloc(4096);
1969 	pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
1970 	pr_alert("mem_dump_obj(vmalloc %px):", rhp);
1971 	mem_dump_obj(rhp);
1972 	pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
1973 	mem_dump_obj(&rhp->func);
1974 	vfree(rhp);
1975 }
1976 
1977 static void
1978 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1979 {
1980 	pr_alert("%s" TORTURE_FLAG
1981 		 "--- %s: nreaders=%d nfakewriters=%d "
1982 		 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1983 		 "shuffle_interval=%d stutter=%d irqreader=%d "
1984 		 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1985 		 "test_boost=%d/%d test_boost_interval=%d "
1986 		 "test_boost_duration=%d shutdown_secs=%d "
1987 		 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1988 		 "stall_cpu_block=%d "
1989 		 "n_barrier_cbs=%d "
1990 		 "onoff_interval=%d onoff_holdoff=%d "
1991 		 "read_exit_delay=%d read_exit_burst=%d "
1992 		 "nocbs_nthreads=%d nocbs_toggle=%d\n",
1993 		 torture_type, tag, nrealreaders, nfakewriters,
1994 		 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1995 		 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1996 		 test_boost, cur_ops->can_boost,
1997 		 test_boost_interval, test_boost_duration, shutdown_secs,
1998 		 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
1999 		 stall_cpu_block,
2000 		 n_barrier_cbs,
2001 		 onoff_interval, onoff_holdoff,
2002 		 read_exit_delay, read_exit_burst,
2003 		 nocbs_nthreads, nocbs_toggle);
2004 }
2005 
2006 static int rcutorture_booster_cleanup(unsigned int cpu)
2007 {
2008 	struct task_struct *t;
2009 
2010 	if (boost_tasks[cpu] == NULL)
2011 		return 0;
2012 	mutex_lock(&boost_mutex);
2013 	t = boost_tasks[cpu];
2014 	boost_tasks[cpu] = NULL;
2015 	rcu_torture_enable_rt_throttle();
2016 	mutex_unlock(&boost_mutex);
2017 
2018 	/* This must be outside of the mutex, otherwise deadlock! */
2019 	torture_stop_kthread(rcu_torture_boost, t);
2020 	return 0;
2021 }
2022 
2023 static int rcutorture_booster_init(unsigned int cpu)
2024 {
2025 	int retval;
2026 
2027 	if (boost_tasks[cpu] != NULL)
2028 		return 0;  /* Already created, nothing more to do. */
2029 
2030 	/* Don't allow time recalculation while creating a new task. */
2031 	mutex_lock(&boost_mutex);
2032 	rcu_torture_disable_rt_throttle();
2033 	VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
2034 	boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
2035 						  cpu_to_node(cpu),
2036 						  "rcu_torture_boost");
2037 	if (IS_ERR(boost_tasks[cpu])) {
2038 		retval = PTR_ERR(boost_tasks[cpu]);
2039 		VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
2040 		n_rcu_torture_boost_ktrerror++;
2041 		boost_tasks[cpu] = NULL;
2042 		mutex_unlock(&boost_mutex);
2043 		return retval;
2044 	}
2045 	kthread_bind(boost_tasks[cpu], cpu);
2046 	wake_up_process(boost_tasks[cpu]);
2047 	mutex_unlock(&boost_mutex);
2048 	return 0;
2049 }
2050 
2051 /*
2052  * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
2053  * induces a CPU stall for the time specified by stall_cpu.
2054  */
2055 static int rcu_torture_stall(void *args)
2056 {
2057 	int idx;
2058 	unsigned long stop_at;
2059 
2060 	VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
2061 	if (stall_cpu_holdoff > 0) {
2062 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2063 		schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2064 		VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2065 	}
2066 	if (!kthread_should_stop() && stall_gp_kthread > 0) {
2067 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2068 		rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2069 		for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2070 			if (kthread_should_stop())
2071 				break;
2072 			schedule_timeout_uninterruptible(HZ);
2073 		}
2074 	}
2075 	if (!kthread_should_stop() && stall_cpu > 0) {
2076 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2077 		stop_at = ktime_get_seconds() + stall_cpu;
2078 		/* RCU CPU stall is expected behavior in following code. */
2079 		idx = cur_ops->readlock();
2080 		if (stall_cpu_irqsoff)
2081 			local_irq_disable();
2082 		else if (!stall_cpu_block)
2083 			preempt_disable();
2084 		pr_alert("%s start on CPU %d.\n",
2085 			  __func__, raw_smp_processor_id());
2086 		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
2087 				    stop_at))
2088 			if (stall_cpu_block) {
2089 #ifdef CONFIG_PREEMPTION
2090 				preempt_schedule();
2091 #else
2092 				schedule_timeout_uninterruptible(HZ);
2093 #endif
2094 			} else if (stall_no_softlockup) {
2095 				touch_softlockup_watchdog();
2096 			}
2097 		if (stall_cpu_irqsoff)
2098 			local_irq_enable();
2099 		else if (!stall_cpu_block)
2100 			preempt_enable();
2101 		cur_ops->readunlock(idx);
2102 	}
2103 	pr_alert("%s end.\n", __func__);
2104 	torture_shutdown_absorb("rcu_torture_stall");
2105 	while (!kthread_should_stop())
2106 		schedule_timeout_interruptible(10 * HZ);
2107 	return 0;
2108 }
2109 
2110 /* Spawn CPU-stall kthread, if stall_cpu specified. */
2111 static int __init rcu_torture_stall_init(void)
2112 {
2113 	if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2114 		return 0;
2115 	return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2116 }
2117 
2118 /* State structure for forward-progress self-propagating RCU callback. */
2119 struct fwd_cb_state {
2120 	struct rcu_head rh;
2121 	int stop;
2122 };
2123 
2124 /*
2125  * Forward-progress self-propagating RCU callback function.  Because
2126  * callbacks run from softirq, this function is an implicit RCU read-side
2127  * critical section.
2128  */
2129 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2130 {
2131 	struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2132 
2133 	if (READ_ONCE(fcsp->stop)) {
2134 		WRITE_ONCE(fcsp->stop, 2);
2135 		return;
2136 	}
2137 	cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2138 }
2139 
2140 /* State for continuous-flood RCU callbacks. */
2141 struct rcu_fwd_cb {
2142 	struct rcu_head rh;
2143 	struct rcu_fwd_cb *rfc_next;
2144 	struct rcu_fwd *rfc_rfp;
2145 	int rfc_gps;
2146 };
2147 
2148 #define MAX_FWD_CB_JIFFIES	(8 * HZ) /* Maximum CB test duration. */
2149 #define MIN_FWD_CB_LAUNDERS	3	/* This many CB invocations to count. */
2150 #define MIN_FWD_CBS_LAUNDERED	100	/* Number of counted CBs. */
2151 #define FWD_CBS_HIST_DIV	10	/* Histogram buckets/second. */
2152 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2153 
2154 struct rcu_launder_hist {
2155 	long n_launders;
2156 	unsigned long launder_gp_seq;
2157 };
2158 
2159 struct rcu_fwd {
2160 	spinlock_t rcu_fwd_lock;
2161 	struct rcu_fwd_cb *rcu_fwd_cb_head;
2162 	struct rcu_fwd_cb **rcu_fwd_cb_tail;
2163 	long n_launders_cb;
2164 	unsigned long rcu_fwd_startat;
2165 	struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2166 	unsigned long rcu_launder_gp_seq_start;
2167 	int rcu_fwd_id;
2168 };
2169 
2170 static DEFINE_MUTEX(rcu_fwd_mutex);
2171 static struct rcu_fwd *rcu_fwds;
2172 static unsigned long rcu_fwd_seq;
2173 static atomic_long_t rcu_fwd_max_cbs;
2174 static bool rcu_fwd_emergency_stop;
2175 
2176 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2177 {
2178 	unsigned long gps;
2179 	unsigned long gps_old;
2180 	int i;
2181 	int j;
2182 
2183 	for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2184 		if (rfp->n_launders_hist[i].n_launders > 0)
2185 			break;
2186 	mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
2187 	pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
2188 		 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
2189 	gps_old = rfp->rcu_launder_gp_seq_start;
2190 	for (j = 0; j <= i; j++) {
2191 		gps = rfp->n_launders_hist[j].launder_gp_seq;
2192 		pr_cont(" %ds/%d: %ld:%ld",
2193 			j + 1, FWD_CBS_HIST_DIV,
2194 			rfp->n_launders_hist[j].n_launders,
2195 			rcutorture_seq_diff(gps, gps_old));
2196 		gps_old = gps;
2197 	}
2198 	pr_cont("\n");
2199 	mutex_unlock(&rcu_fwd_mutex);
2200 }
2201 
2202 /* Callback function for continuous-flood RCU callbacks. */
2203 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2204 {
2205 	unsigned long flags;
2206 	int i;
2207 	struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2208 	struct rcu_fwd_cb **rfcpp;
2209 	struct rcu_fwd *rfp = rfcp->rfc_rfp;
2210 
2211 	rfcp->rfc_next = NULL;
2212 	rfcp->rfc_gps++;
2213 	spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2214 	rfcpp = rfp->rcu_fwd_cb_tail;
2215 	rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2216 	WRITE_ONCE(*rfcpp, rfcp);
2217 	WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2218 	i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2219 	if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2220 		i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2221 	rfp->n_launders_hist[i].n_launders++;
2222 	rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2223 	spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2224 }
2225 
2226 // Give the scheduler a chance, even on nohz_full CPUs.
2227 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2228 {
2229 	if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2230 		// Real call_rcu() floods hit userspace, so emulate that.
2231 		if (need_resched() || (iter & 0xfff))
2232 			schedule();
2233 		return;
2234 	}
2235 	// No userspace emulation: CB invocation throttles call_rcu()
2236 	cond_resched();
2237 }
2238 
2239 /*
2240  * Free all callbacks on the rcu_fwd_cb_head list, either because the
2241  * test is over or because we hit an OOM event.
2242  */
2243 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2244 {
2245 	unsigned long flags;
2246 	unsigned long freed = 0;
2247 	struct rcu_fwd_cb *rfcp;
2248 
2249 	for (;;) {
2250 		spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2251 		rfcp = rfp->rcu_fwd_cb_head;
2252 		if (!rfcp) {
2253 			spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2254 			break;
2255 		}
2256 		rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2257 		if (!rfp->rcu_fwd_cb_head)
2258 			rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2259 		spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2260 		kfree(rfcp);
2261 		freed++;
2262 		rcu_torture_fwd_prog_cond_resched(freed);
2263 		if (tick_nohz_full_enabled()) {
2264 			local_irq_save(flags);
2265 			rcu_momentary_dyntick_idle();
2266 			local_irq_restore(flags);
2267 		}
2268 	}
2269 	return freed;
2270 }
2271 
2272 /* Carry out need_resched()/cond_resched() forward-progress testing. */
2273 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2274 				    int *tested, int *tested_tries)
2275 {
2276 	unsigned long cver;
2277 	unsigned long dur;
2278 	struct fwd_cb_state fcs;
2279 	unsigned long gps;
2280 	int idx;
2281 	int sd;
2282 	int sd4;
2283 	bool selfpropcb = false;
2284 	unsigned long stopat;
2285 	static DEFINE_TORTURE_RANDOM(trs);
2286 
2287 	if (!cur_ops->sync)
2288 		return; // Cannot do need_resched() forward progress testing without ->sync.
2289 	if (cur_ops->call && cur_ops->cb_barrier) {
2290 		init_rcu_head_on_stack(&fcs.rh);
2291 		selfpropcb = true;
2292 	}
2293 
2294 	/* Tight loop containing cond_resched(). */
2295 	WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2296 	cur_ops->sync(); /* Later readers see above write. */
2297 	if  (selfpropcb) {
2298 		WRITE_ONCE(fcs.stop, 0);
2299 		cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2300 	}
2301 	cver = READ_ONCE(rcu_torture_current_version);
2302 	gps = cur_ops->get_gp_seq();
2303 	sd = cur_ops->stall_dur() + 1;
2304 	sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2305 	dur = sd4 + torture_random(&trs) % (sd - sd4);
2306 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2307 	stopat = rfp->rcu_fwd_startat + dur;
2308 	while (time_before(jiffies, stopat) &&
2309 	       !shutdown_time_arrived() &&
2310 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2311 		idx = cur_ops->readlock();
2312 		udelay(10);
2313 		cur_ops->readunlock(idx);
2314 		if (!fwd_progress_need_resched || need_resched())
2315 			cond_resched();
2316 	}
2317 	(*tested_tries)++;
2318 	if (!time_before(jiffies, stopat) &&
2319 	    !shutdown_time_arrived() &&
2320 	    !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2321 		(*tested)++;
2322 		cver = READ_ONCE(rcu_torture_current_version) - cver;
2323 		gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2324 		WARN_ON(!cver && gps < 2);
2325 		pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
2326 			 rfp->rcu_fwd_id, dur, cver, gps);
2327 	}
2328 	if (selfpropcb) {
2329 		WRITE_ONCE(fcs.stop, 1);
2330 		cur_ops->sync(); /* Wait for running CB to complete. */
2331 		cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2332 	}
2333 
2334 	if (selfpropcb) {
2335 		WARN_ON(READ_ONCE(fcs.stop) != 2);
2336 		destroy_rcu_head_on_stack(&fcs.rh);
2337 	}
2338 	schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2339 	WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2340 }
2341 
2342 /* Carry out call_rcu() forward-progress testing. */
2343 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2344 {
2345 	unsigned long cver;
2346 	unsigned long flags;
2347 	unsigned long gps;
2348 	int i;
2349 	long n_launders;
2350 	long n_launders_cb_snap;
2351 	long n_launders_sa;
2352 	long n_max_cbs;
2353 	long n_max_gps;
2354 	struct rcu_fwd_cb *rfcp;
2355 	struct rcu_fwd_cb *rfcpn;
2356 	unsigned long stopat;
2357 	unsigned long stoppedat;
2358 
2359 	if (READ_ONCE(rcu_fwd_emergency_stop))
2360 		return; /* Get out of the way quickly, no GP wait! */
2361 	if (!cur_ops->call)
2362 		return; /* Can't do call_rcu() fwd prog without ->call. */
2363 
2364 	/* Loop continuously posting RCU callbacks. */
2365 	WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2366 	cur_ops->sync(); /* Later readers see above write. */
2367 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2368 	stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2369 	n_launders = 0;
2370 	rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2371 	n_launders_sa = 0;
2372 	n_max_cbs = 0;
2373 	n_max_gps = 0;
2374 	for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2375 		rfp->n_launders_hist[i].n_launders = 0;
2376 	cver = READ_ONCE(rcu_torture_current_version);
2377 	gps = cur_ops->get_gp_seq();
2378 	rfp->rcu_launder_gp_seq_start = gps;
2379 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2380 	while (time_before(jiffies, stopat) &&
2381 	       !shutdown_time_arrived() &&
2382 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2383 		rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2384 		rfcpn = NULL;
2385 		if (rfcp)
2386 			rfcpn = READ_ONCE(rfcp->rfc_next);
2387 		if (rfcpn) {
2388 			if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2389 			    ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2390 				break;
2391 			rfp->rcu_fwd_cb_head = rfcpn;
2392 			n_launders++;
2393 			n_launders_sa++;
2394 		} else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
2395 			rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2396 			if (WARN_ON_ONCE(!rfcp)) {
2397 				schedule_timeout_interruptible(1);
2398 				continue;
2399 			}
2400 			n_max_cbs++;
2401 			n_launders_sa = 0;
2402 			rfcp->rfc_gps = 0;
2403 			rfcp->rfc_rfp = rfp;
2404 		} else {
2405 			rfcp = NULL;
2406 		}
2407 		if (rfcp)
2408 			cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2409 		rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2410 		if (tick_nohz_full_enabled()) {
2411 			local_irq_save(flags);
2412 			rcu_momentary_dyntick_idle();
2413 			local_irq_restore(flags);
2414 		}
2415 	}
2416 	stoppedat = jiffies;
2417 	n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2418 	cver = READ_ONCE(rcu_torture_current_version) - cver;
2419 	gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2420 	cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2421 	(void)rcu_torture_fwd_prog_cbfree(rfp);
2422 
2423 	if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2424 	    !shutdown_time_arrived()) {
2425 		WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2426 		pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2427 			 __func__,
2428 			 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2429 			 n_launders + n_max_cbs - n_launders_cb_snap,
2430 			 n_launders, n_launders_sa,
2431 			 n_max_gps, n_max_cbs, cver, gps);
2432 		atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
2433 		rcu_torture_fwd_cb_hist(rfp);
2434 	}
2435 	schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2436 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2437 	WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2438 }
2439 
2440 
2441 /*
2442  * OOM notifier, but this only prints diagnostic information for the
2443  * current forward-progress test.
2444  */
2445 static int rcutorture_oom_notify(struct notifier_block *self,
2446 				 unsigned long notused, void *nfreed)
2447 {
2448 	int i;
2449 	long ncbs;
2450 	struct rcu_fwd *rfp;
2451 
2452 	mutex_lock(&rcu_fwd_mutex);
2453 	rfp = rcu_fwds;
2454 	if (!rfp) {
2455 		mutex_unlock(&rcu_fwd_mutex);
2456 		return NOTIFY_OK;
2457 	}
2458 	WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2459 	     __func__);
2460 	for (i = 0; i < fwd_progress; i++) {
2461 		rcu_torture_fwd_cb_hist(&rfp[i]);
2462 		rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
2463 	}
2464 	WRITE_ONCE(rcu_fwd_emergency_stop, true);
2465 	smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2466 	ncbs = 0;
2467 	for (i = 0; i < fwd_progress; i++)
2468 		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2469 	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2470 	rcu_barrier();
2471 	ncbs = 0;
2472 	for (i = 0; i < fwd_progress; i++)
2473 		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2474 	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2475 	rcu_barrier();
2476 	ncbs = 0;
2477 	for (i = 0; i < fwd_progress; i++)
2478 		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2479 	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2480 	smp_mb(); /* Frees before return to avoid redoing OOM. */
2481 	(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2482 	pr_info("%s returning after OOM processing.\n", __func__);
2483 	mutex_unlock(&rcu_fwd_mutex);
2484 	return NOTIFY_OK;
2485 }
2486 
2487 static struct notifier_block rcutorture_oom_nb = {
2488 	.notifier_call = rcutorture_oom_notify
2489 };
2490 
2491 /* Carry out grace-period forward-progress testing. */
2492 static int rcu_torture_fwd_prog(void *args)
2493 {
2494 	bool firsttime = true;
2495 	long max_cbs;
2496 	int oldnice = task_nice(current);
2497 	unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
2498 	struct rcu_fwd *rfp = args;
2499 	int tested = 0;
2500 	int tested_tries = 0;
2501 
2502 	VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2503 	rcu_bind_current_to_nocb();
2504 	if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2505 		set_user_nice(current, MAX_NICE);
2506 	do {
2507 		if (!rfp->rcu_fwd_id) {
2508 			schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2509 			WRITE_ONCE(rcu_fwd_emergency_stop, false);
2510 			if (!firsttime) {
2511 				max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
2512 				pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
2513 			}
2514 			firsttime = false;
2515 			WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
2516 		} else {
2517 			while (READ_ONCE(rcu_fwd_seq) == oldseq)
2518 				schedule_timeout_interruptible(1);
2519 			oldseq = READ_ONCE(rcu_fwd_seq);
2520 		}
2521 		pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2522 		if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
2523 			rcu_torture_fwd_prog_cr(rfp);
2524 		if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
2525 		    (!IS_ENABLED(CONFIG_TINY_RCU) ||
2526 		     (rcu_inkernel_boot_has_ended() &&
2527 		      torture_num_online_cpus() > rfp->rcu_fwd_id)))
2528 			rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2529 
2530 		/* Avoid slow periods, better to test when busy. */
2531 		if (stutter_wait("rcu_torture_fwd_prog"))
2532 			sched_set_normal(current, oldnice);
2533 	} while (!torture_must_stop());
2534 	/* Short runs might not contain a valid forward-progress attempt. */
2535 	if (!rfp->rcu_fwd_id) {
2536 		WARN_ON(!tested && tested_tries >= 5);
2537 		pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2538 	}
2539 	torture_kthread_stopping("rcu_torture_fwd_prog");
2540 	return 0;
2541 }
2542 
2543 /* If forward-progress checking is requested and feasible, spawn the thread. */
2544 static int __init rcu_torture_fwd_prog_init(void)
2545 {
2546 	int i;
2547 	int ret = 0;
2548 	struct rcu_fwd *rfp;
2549 
2550 	if (!fwd_progress)
2551 		return 0; /* Not requested, so don't do it. */
2552 	if (fwd_progress >= nr_cpu_ids) {
2553 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
2554 		fwd_progress = nr_cpu_ids;
2555 	} else if (fwd_progress < 0) {
2556 		fwd_progress = nr_cpu_ids;
2557 	}
2558 	if ((!cur_ops->sync && !cur_ops->call) ||
2559 	    (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
2560 	    cur_ops == &rcu_busted_ops) {
2561 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2562 		fwd_progress = 0;
2563 		return 0;
2564 	}
2565 	if (stall_cpu > 0) {
2566 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2567 		fwd_progress = 0;
2568 		if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
2569 			return -EINVAL; /* In module, can fail back to user. */
2570 		WARN_ON(1); /* Make sure rcutorture notices conflict. */
2571 		return 0;
2572 	}
2573 	if (fwd_progress_holdoff <= 0)
2574 		fwd_progress_holdoff = 1;
2575 	if (fwd_progress_div <= 0)
2576 		fwd_progress_div = 4;
2577 	rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
2578 	fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
2579 	if (!rfp || !fwd_prog_tasks) {
2580 		kfree(rfp);
2581 		kfree(fwd_prog_tasks);
2582 		fwd_prog_tasks = NULL;
2583 		fwd_progress = 0;
2584 		return -ENOMEM;
2585 	}
2586 	for (i = 0; i < fwd_progress; i++) {
2587 		spin_lock_init(&rfp[i].rcu_fwd_lock);
2588 		rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
2589 		rfp[i].rcu_fwd_id = i;
2590 	}
2591 	mutex_lock(&rcu_fwd_mutex);
2592 	rcu_fwds = rfp;
2593 	mutex_unlock(&rcu_fwd_mutex);
2594 	register_oom_notifier(&rcutorture_oom_nb);
2595 	for (i = 0; i < fwd_progress; i++) {
2596 		ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
2597 		if (ret) {
2598 			fwd_progress = i;
2599 			return ret;
2600 		}
2601 	}
2602 	return 0;
2603 }
2604 
2605 static void rcu_torture_fwd_prog_cleanup(void)
2606 {
2607 	int i;
2608 	struct rcu_fwd *rfp;
2609 
2610 	if (!rcu_fwds || !fwd_prog_tasks)
2611 		return;
2612 	for (i = 0; i < fwd_progress; i++)
2613 		torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
2614 	unregister_oom_notifier(&rcutorture_oom_nb);
2615 	mutex_lock(&rcu_fwd_mutex);
2616 	rfp = rcu_fwds;
2617 	rcu_fwds = NULL;
2618 	mutex_unlock(&rcu_fwd_mutex);
2619 	kfree(rfp);
2620 	kfree(fwd_prog_tasks);
2621 	fwd_prog_tasks = NULL;
2622 }
2623 
2624 /* Callback function for RCU barrier testing. */
2625 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
2626 {
2627 	atomic_inc(&barrier_cbs_invoked);
2628 }
2629 
2630 /* IPI handler to get callback posted on desired CPU, if online. */
2631 static void rcu_torture_barrier1cb(void *rcu_void)
2632 {
2633 	struct rcu_head *rhp = rcu_void;
2634 
2635 	cur_ops->call(rhp, rcu_torture_barrier_cbf);
2636 }
2637 
2638 /* kthread function to register callbacks used to test RCU barriers. */
2639 static int rcu_torture_barrier_cbs(void *arg)
2640 {
2641 	long myid = (long)arg;
2642 	bool lastphase = false;
2643 	bool newphase;
2644 	struct rcu_head rcu;
2645 
2646 	init_rcu_head_on_stack(&rcu);
2647 	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2648 	set_user_nice(current, MAX_NICE);
2649 	do {
2650 		wait_event(barrier_cbs_wq[myid],
2651 			   (newphase =
2652 			    smp_load_acquire(&barrier_phase)) != lastphase ||
2653 			   torture_must_stop());
2654 		lastphase = newphase;
2655 		if (torture_must_stop())
2656 			break;
2657 		/*
2658 		 * The above smp_load_acquire() ensures barrier_phase load
2659 		 * is ordered before the following ->call().
2660 		 */
2661 		if (smp_call_function_single(myid, rcu_torture_barrier1cb,
2662 					     &rcu, 1)) {
2663 			// IPI failed, so use direct call from current CPU.
2664 			cur_ops->call(&rcu, rcu_torture_barrier_cbf);
2665 		}
2666 		if (atomic_dec_and_test(&barrier_cbs_count))
2667 			wake_up(&barrier_wq);
2668 	} while (!torture_must_stop());
2669 	if (cur_ops->cb_barrier != NULL)
2670 		cur_ops->cb_barrier();
2671 	destroy_rcu_head_on_stack(&rcu);
2672 	torture_kthread_stopping("rcu_torture_barrier_cbs");
2673 	return 0;
2674 }
2675 
2676 /* kthread function to drive and coordinate RCU barrier testing. */
2677 static int rcu_torture_barrier(void *arg)
2678 {
2679 	int i;
2680 
2681 	VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2682 	do {
2683 		atomic_set(&barrier_cbs_invoked, 0);
2684 		atomic_set(&barrier_cbs_count, n_barrier_cbs);
2685 		/* Ensure barrier_phase ordered after prior assignments. */
2686 		smp_store_release(&barrier_phase, !barrier_phase);
2687 		for (i = 0; i < n_barrier_cbs; i++)
2688 			wake_up(&barrier_cbs_wq[i]);
2689 		wait_event(barrier_wq,
2690 			   atomic_read(&barrier_cbs_count) == 0 ||
2691 			   torture_must_stop());
2692 		if (torture_must_stop())
2693 			break;
2694 		n_barrier_attempts++;
2695 		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
2696 		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2697 			n_rcu_torture_barrier_error++;
2698 			pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2699 			       atomic_read(&barrier_cbs_invoked),
2700 			       n_barrier_cbs);
2701 			WARN_ON(1);
2702 			// Wait manually for the remaining callbacks
2703 			i = 0;
2704 			do {
2705 				if (WARN_ON(i++ > HZ))
2706 					i = INT_MIN;
2707 				schedule_timeout_interruptible(1);
2708 				cur_ops->cb_barrier();
2709 			} while (atomic_read(&barrier_cbs_invoked) !=
2710 				 n_barrier_cbs &&
2711 				 !torture_must_stop());
2712 			smp_mb(); // Can't trust ordering if broken.
2713 			if (!torture_must_stop())
2714 				pr_err("Recovered: barrier_cbs_invoked = %d\n",
2715 				       atomic_read(&barrier_cbs_invoked));
2716 		} else {
2717 			n_barrier_successes++;
2718 		}
2719 		schedule_timeout_interruptible(HZ / 10);
2720 	} while (!torture_must_stop());
2721 	torture_kthread_stopping("rcu_torture_barrier");
2722 	return 0;
2723 }
2724 
2725 /* Initialize RCU barrier testing. */
2726 static int rcu_torture_barrier_init(void)
2727 {
2728 	int i;
2729 	int ret;
2730 
2731 	if (n_barrier_cbs <= 0)
2732 		return 0;
2733 	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
2734 		pr_alert("%s" TORTURE_FLAG
2735 			 " Call or barrier ops missing for %s,\n",
2736 			 torture_type, cur_ops->name);
2737 		pr_alert("%s" TORTURE_FLAG
2738 			 " RCU barrier testing omitted from run.\n",
2739 			 torture_type);
2740 		return 0;
2741 	}
2742 	atomic_set(&barrier_cbs_count, 0);
2743 	atomic_set(&barrier_cbs_invoked, 0);
2744 	barrier_cbs_tasks =
2745 		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
2746 			GFP_KERNEL);
2747 	barrier_cbs_wq =
2748 		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
2749 	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
2750 		return -ENOMEM;
2751 	for (i = 0; i < n_barrier_cbs; i++) {
2752 		init_waitqueue_head(&barrier_cbs_wq[i]);
2753 		ret = torture_create_kthread(rcu_torture_barrier_cbs,
2754 					     (void *)(long)i,
2755 					     barrier_cbs_tasks[i]);
2756 		if (ret)
2757 			return ret;
2758 	}
2759 	return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
2760 }
2761 
2762 /* Clean up after RCU barrier testing. */
2763 static void rcu_torture_barrier_cleanup(void)
2764 {
2765 	int i;
2766 
2767 	torture_stop_kthread(rcu_torture_barrier, barrier_task);
2768 	if (barrier_cbs_tasks != NULL) {
2769 		for (i = 0; i < n_barrier_cbs; i++)
2770 			torture_stop_kthread(rcu_torture_barrier_cbs,
2771 					     barrier_cbs_tasks[i]);
2772 		kfree(barrier_cbs_tasks);
2773 		barrier_cbs_tasks = NULL;
2774 	}
2775 	if (barrier_cbs_wq != NULL) {
2776 		kfree(barrier_cbs_wq);
2777 		barrier_cbs_wq = NULL;
2778 	}
2779 }
2780 
2781 static bool rcu_torture_can_boost(void)
2782 {
2783 	static int boost_warn_once;
2784 	int prio;
2785 
2786 	if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2787 		return false;
2788 	if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
2789 		return false;
2790 
2791 	prio = rcu_get_gp_kthreads_prio();
2792 	if (!prio)
2793 		return false;
2794 
2795 	if (prio < 2) {
2796 		if (boost_warn_once == 1)
2797 			return false;
2798 
2799 		pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
2800 		boost_warn_once = 1;
2801 		return false;
2802 	}
2803 
2804 	return true;
2805 }
2806 
2807 static bool read_exit_child_stop;
2808 static bool read_exit_child_stopped;
2809 static wait_queue_head_t read_exit_wq;
2810 
2811 // Child kthread which just does an rcutorture reader and exits.
2812 static int rcu_torture_read_exit_child(void *trsp_in)
2813 {
2814 	struct torture_random_state *trsp = trsp_in;
2815 
2816 	set_user_nice(current, MAX_NICE);
2817 	// Minimize time between reading and exiting.
2818 	while (!kthread_should_stop())
2819 		schedule_timeout_uninterruptible(1);
2820 	(void)rcu_torture_one_read(trsp, -1);
2821 	return 0;
2822 }
2823 
2824 // Parent kthread which creates and destroys read-exit child kthreads.
2825 static int rcu_torture_read_exit(void *unused)
2826 {
2827 	int count = 0;
2828 	bool errexit = false;
2829 	int i;
2830 	struct task_struct *tsp;
2831 	DEFINE_TORTURE_RANDOM(trs);
2832 
2833 	// Allocate and initialize.
2834 	set_user_nice(current, MAX_NICE);
2835 	VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
2836 
2837 	// Each pass through this loop does one read-exit episode.
2838 	do {
2839 		if (++count > read_exit_burst) {
2840 			VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
2841 			rcu_barrier(); // Wait for task_struct free, avoid OOM.
2842 			for (i = 0; i < read_exit_delay; i++) {
2843 				schedule_timeout_uninterruptible(HZ);
2844 				if (READ_ONCE(read_exit_child_stop))
2845 					break;
2846 			}
2847 			if (!READ_ONCE(read_exit_child_stop))
2848 				VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
2849 			count = 0;
2850 		}
2851 		if (READ_ONCE(read_exit_child_stop))
2852 			break;
2853 		// Spawn child.
2854 		tsp = kthread_run(rcu_torture_read_exit_child,
2855 				     &trs, "%s",
2856 				     "rcu_torture_read_exit_child");
2857 		if (IS_ERR(tsp)) {
2858 			TOROUT_ERRSTRING("out of memory");
2859 			errexit = true;
2860 			tsp = NULL;
2861 			break;
2862 		}
2863 		cond_resched();
2864 		kthread_stop(tsp);
2865 		n_read_exits ++;
2866 		stutter_wait("rcu_torture_read_exit");
2867 	} while (!errexit && !READ_ONCE(read_exit_child_stop));
2868 
2869 	// Clean up and exit.
2870 	smp_store_release(&read_exit_child_stopped, true); // After reaping.
2871 	smp_mb(); // Store before wakeup.
2872 	wake_up(&read_exit_wq);
2873 	while (!torture_must_stop())
2874 		schedule_timeout_uninterruptible(1);
2875 	torture_kthread_stopping("rcu_torture_read_exit");
2876 	return 0;
2877 }
2878 
2879 static int rcu_torture_read_exit_init(void)
2880 {
2881 	if (read_exit_burst <= 0)
2882 		return 0;
2883 	init_waitqueue_head(&read_exit_wq);
2884 	read_exit_child_stop = false;
2885 	read_exit_child_stopped = false;
2886 	return torture_create_kthread(rcu_torture_read_exit, NULL,
2887 				      read_exit_task);
2888 }
2889 
2890 static void rcu_torture_read_exit_cleanup(void)
2891 {
2892 	if (!read_exit_task)
2893 		return;
2894 	WRITE_ONCE(read_exit_child_stop, true);
2895 	smp_mb(); // Above write before wait.
2896 	wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
2897 	torture_stop_kthread(rcutorture_read_exit, read_exit_task);
2898 }
2899 
2900 static enum cpuhp_state rcutor_hp;
2901 
2902 static void
2903 rcu_torture_cleanup(void)
2904 {
2905 	int firsttime;
2906 	int flags = 0;
2907 	unsigned long gp_seq = 0;
2908 	int i;
2909 
2910 	if (torture_cleanup_begin()) {
2911 		if (cur_ops->cb_barrier != NULL)
2912 			cur_ops->cb_barrier();
2913 		return;
2914 	}
2915 	if (!cur_ops) {
2916 		torture_cleanup_end();
2917 		return;
2918 	}
2919 
2920 	if (cur_ops->gp_kthread_dbg)
2921 		cur_ops->gp_kthread_dbg();
2922 	rcu_torture_read_exit_cleanup();
2923 	rcu_torture_barrier_cleanup();
2924 	rcu_torture_fwd_prog_cleanup();
2925 	torture_stop_kthread(rcu_torture_stall, stall_task);
2926 	torture_stop_kthread(rcu_torture_writer, writer_task);
2927 
2928 	if (nocb_tasks) {
2929 		for (i = 0; i < nrealnocbers; i++)
2930 			torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
2931 		kfree(nocb_tasks);
2932 		nocb_tasks = NULL;
2933 	}
2934 
2935 	if (reader_tasks) {
2936 		for (i = 0; i < nrealreaders; i++)
2937 			torture_stop_kthread(rcu_torture_reader,
2938 					     reader_tasks[i]);
2939 		kfree(reader_tasks);
2940 		reader_tasks = NULL;
2941 	}
2942 	kfree(rcu_torture_reader_mbchk);
2943 	rcu_torture_reader_mbchk = NULL;
2944 
2945 	if (fakewriter_tasks) {
2946 		for (i = 0; i < nfakewriters; i++)
2947 			torture_stop_kthread(rcu_torture_fakewriter,
2948 					     fakewriter_tasks[i]);
2949 		kfree(fakewriter_tasks);
2950 		fakewriter_tasks = NULL;
2951 	}
2952 
2953 	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2954 	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2955 	pr_alert("%s:  End-test grace-period state: g%ld f%#x total-gps=%ld\n",
2956 		 cur_ops->name, (long)gp_seq, flags,
2957 		 rcutorture_seq_diff(gp_seq, start_gp_seq));
2958 	torture_stop_kthread(rcu_torture_stats, stats_task);
2959 	torture_stop_kthread(rcu_torture_fqs, fqs_task);
2960 	if (rcu_torture_can_boost() && rcutor_hp >= 0)
2961 		cpuhp_remove_state(rcutor_hp);
2962 
2963 	/*
2964 	 * Wait for all RCU callbacks to fire, then do torture-type-specific
2965 	 * cleanup operations.
2966 	 */
2967 	if (cur_ops->cb_barrier != NULL)
2968 		cur_ops->cb_barrier();
2969 	if (cur_ops->cleanup != NULL)
2970 		cur_ops->cleanup();
2971 
2972 	rcu_torture_mem_dump_obj();
2973 
2974 	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
2975 
2976 	if (err_segs_recorded) {
2977 		pr_alert("Failure/close-call rcutorture reader segments:\n");
2978 		if (rt_read_nsegs == 0)
2979 			pr_alert("\t: No segments recorded!!!\n");
2980 		firsttime = 1;
2981 		for (i = 0; i < rt_read_nsegs; i++) {
2982 			pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2983 			if (err_segs[i].rt_delay_jiffies != 0) {
2984 				pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2985 					err_segs[i].rt_delay_jiffies);
2986 				firsttime = 0;
2987 			}
2988 			if (err_segs[i].rt_delay_ms != 0) {
2989 				pr_cont("%s%ldms", firsttime ? "" : "+",
2990 					err_segs[i].rt_delay_ms);
2991 				firsttime = 0;
2992 			}
2993 			if (err_segs[i].rt_delay_us != 0) {
2994 				pr_cont("%s%ldus", firsttime ? "" : "+",
2995 					err_segs[i].rt_delay_us);
2996 				firsttime = 0;
2997 			}
2998 			pr_cont("%s\n",
2999 				err_segs[i].rt_preempted ? "preempted" : "");
3000 
3001 		}
3002 	}
3003 	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
3004 		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
3005 	else if (torture_onoff_failures())
3006 		rcu_torture_print_module_parms(cur_ops,
3007 					       "End of test: RCU_HOTPLUG");
3008 	else
3009 		rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
3010 	torture_cleanup_end();
3011 }
3012 
3013 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3014 static void rcu_torture_leak_cb(struct rcu_head *rhp)
3015 {
3016 }
3017 
3018 static void rcu_torture_err_cb(struct rcu_head *rhp)
3019 {
3020 	/*
3021 	 * This -might- happen due to race conditions, but is unlikely.
3022 	 * The scenario that leads to this happening is that the
3023 	 * first of the pair of duplicate callbacks is queued,
3024 	 * someone else starts a grace period that includes that
3025 	 * callback, then the second of the pair must wait for the
3026 	 * next grace period.  Unlikely, but can happen.  If it
3027 	 * does happen, the debug-objects subsystem won't have splatted.
3028 	 */
3029 	pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
3030 }
3031 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3032 
3033 /*
3034  * Verify that double-free causes debug-objects to complain, but only
3035  * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
3036  * cannot be carried out.
3037  */
3038 static void rcu_test_debug_objects(void)
3039 {
3040 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3041 	struct rcu_head rh1;
3042 	struct rcu_head rh2;
3043 	struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
3044 
3045 	init_rcu_head_on_stack(&rh1);
3046 	init_rcu_head_on_stack(&rh2);
3047 	pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
3048 
3049 	/* Try to queue the rh2 pair of callbacks for the same grace period. */
3050 	preempt_disable(); /* Prevent preemption from interrupting test. */
3051 	rcu_read_lock(); /* Make it impossible to finish a grace period. */
3052 	call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
3053 	local_irq_disable(); /* Make it harder to start a new grace period. */
3054 	call_rcu(&rh2, rcu_torture_leak_cb);
3055 	call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
3056 	if (rhp) {
3057 		call_rcu(rhp, rcu_torture_leak_cb);
3058 		call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
3059 	}
3060 	local_irq_enable();
3061 	rcu_read_unlock();
3062 	preempt_enable();
3063 
3064 	/* Wait for them all to get done so we can safely return. */
3065 	rcu_barrier();
3066 	pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
3067 	destroy_rcu_head_on_stack(&rh1);
3068 	destroy_rcu_head_on_stack(&rh2);
3069 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3070 	pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
3071 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3072 }
3073 
3074 static void rcutorture_sync(void)
3075 {
3076 	static unsigned long n;
3077 
3078 	if (cur_ops->sync && !(++n & 0xfff))
3079 		cur_ops->sync();
3080 }
3081 
3082 static int __init
3083 rcu_torture_init(void)
3084 {
3085 	long i;
3086 	int cpu;
3087 	int firsterr = 0;
3088 	int flags = 0;
3089 	unsigned long gp_seq = 0;
3090 	static struct rcu_torture_ops *torture_ops[] = {
3091 		&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
3092 		&busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
3093 		&tasks_tracing_ops, &trivial_ops,
3094 	};
3095 
3096 	if (!torture_init_begin(torture_type, verbose))
3097 		return -EBUSY;
3098 
3099 	/* Process args and tell the world that the torturer is on the job. */
3100 	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
3101 		cur_ops = torture_ops[i];
3102 		if (strcmp(torture_type, cur_ops->name) == 0)
3103 			break;
3104 	}
3105 	if (i == ARRAY_SIZE(torture_ops)) {
3106 		pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
3107 			 torture_type);
3108 		pr_alert("rcu-torture types:");
3109 		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
3110 			pr_cont(" %s", torture_ops[i]->name);
3111 		pr_cont("\n");
3112 		firsterr = -EINVAL;
3113 		cur_ops = NULL;
3114 		goto unwind;
3115 	}
3116 	if (cur_ops->fqs == NULL && fqs_duration != 0) {
3117 		pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
3118 		fqs_duration = 0;
3119 	}
3120 	if (cur_ops->init)
3121 		cur_ops->init();
3122 
3123 	if (nreaders >= 0) {
3124 		nrealreaders = nreaders;
3125 	} else {
3126 		nrealreaders = num_online_cpus() - 2 - nreaders;
3127 		if (nrealreaders <= 0)
3128 			nrealreaders = 1;
3129 	}
3130 	rcu_torture_print_module_parms(cur_ops, "Start of test");
3131 	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3132 	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3133 	start_gp_seq = gp_seq;
3134 	pr_alert("%s:  Start-test grace-period state: g%ld f%#x\n",
3135 		 cur_ops->name, (long)gp_seq, flags);
3136 
3137 	/* Set up the freelist. */
3138 
3139 	INIT_LIST_HEAD(&rcu_torture_freelist);
3140 	for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
3141 		rcu_tortures[i].rtort_mbtest = 0;
3142 		list_add_tail(&rcu_tortures[i].rtort_free,
3143 			      &rcu_torture_freelist);
3144 	}
3145 
3146 	/* Initialize the statistics so that each run gets its own numbers. */
3147 
3148 	rcu_torture_current = NULL;
3149 	rcu_torture_current_version = 0;
3150 	atomic_set(&n_rcu_torture_alloc, 0);
3151 	atomic_set(&n_rcu_torture_alloc_fail, 0);
3152 	atomic_set(&n_rcu_torture_free, 0);
3153 	atomic_set(&n_rcu_torture_mberror, 0);
3154 	atomic_set(&n_rcu_torture_mbchk_fail, 0);
3155 	atomic_set(&n_rcu_torture_mbchk_tries, 0);
3156 	atomic_set(&n_rcu_torture_error, 0);
3157 	n_rcu_torture_barrier_error = 0;
3158 	n_rcu_torture_boost_ktrerror = 0;
3159 	n_rcu_torture_boost_rterror = 0;
3160 	n_rcu_torture_boost_failure = 0;
3161 	n_rcu_torture_boosts = 0;
3162 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3163 		atomic_set(&rcu_torture_wcount[i], 0);
3164 	for_each_possible_cpu(cpu) {
3165 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3166 			per_cpu(rcu_torture_count, cpu)[i] = 0;
3167 			per_cpu(rcu_torture_batch, cpu)[i] = 0;
3168 		}
3169 	}
3170 	err_segs_recorded = 0;
3171 	rt_read_nsegs = 0;
3172 
3173 	/* Start up the kthreads. */
3174 
3175 	rcu_torture_write_types();
3176 	firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3177 					  writer_task);
3178 	if (torture_init_error(firsterr))
3179 		goto unwind;
3180 	if (nfakewriters > 0) {
3181 		fakewriter_tasks = kcalloc(nfakewriters,
3182 					   sizeof(fakewriter_tasks[0]),
3183 					   GFP_KERNEL);
3184 		if (fakewriter_tasks == NULL) {
3185 			TOROUT_ERRSTRING("out of memory");
3186 			firsterr = -ENOMEM;
3187 			goto unwind;
3188 		}
3189 	}
3190 	for (i = 0; i < nfakewriters; i++) {
3191 		firsterr = torture_create_kthread(rcu_torture_fakewriter,
3192 						  NULL, fakewriter_tasks[i]);
3193 		if (torture_init_error(firsterr))
3194 			goto unwind;
3195 	}
3196 	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3197 			       GFP_KERNEL);
3198 	rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3199 					   GFP_KERNEL);
3200 	if (!reader_tasks || !rcu_torture_reader_mbchk) {
3201 		TOROUT_ERRSTRING("out of memory");
3202 		firsterr = -ENOMEM;
3203 		goto unwind;
3204 	}
3205 	for (i = 0; i < nrealreaders; i++) {
3206 		rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3207 		firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3208 						  reader_tasks[i]);
3209 		if (torture_init_error(firsterr))
3210 			goto unwind;
3211 	}
3212 	nrealnocbers = nocbs_nthreads;
3213 	if (WARN_ON(nrealnocbers < 0))
3214 		nrealnocbers = 1;
3215 	if (WARN_ON(nocbs_toggle < 0))
3216 		nocbs_toggle = HZ;
3217 	if (nrealnocbers > 0) {
3218 		nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3219 		if (nocb_tasks == NULL) {
3220 			TOROUT_ERRSTRING("out of memory");
3221 			firsterr = -ENOMEM;
3222 			goto unwind;
3223 		}
3224 	} else {
3225 		nocb_tasks = NULL;
3226 	}
3227 	for (i = 0; i < nrealnocbers; i++) {
3228 		firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3229 		if (torture_init_error(firsterr))
3230 			goto unwind;
3231 	}
3232 	if (stat_interval > 0) {
3233 		firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3234 						  stats_task);
3235 		if (torture_init_error(firsterr))
3236 			goto unwind;
3237 	}
3238 	if (test_no_idle_hz && shuffle_interval > 0) {
3239 		firsterr = torture_shuffle_init(shuffle_interval * HZ);
3240 		if (torture_init_error(firsterr))
3241 			goto unwind;
3242 	}
3243 	if (stutter < 0)
3244 		stutter = 0;
3245 	if (stutter) {
3246 		int t;
3247 
3248 		t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3249 		firsterr = torture_stutter_init(stutter * HZ, t);
3250 		if (torture_init_error(firsterr))
3251 			goto unwind;
3252 	}
3253 	if (fqs_duration < 0)
3254 		fqs_duration = 0;
3255 	if (fqs_duration) {
3256 		/* Create the fqs thread */
3257 		firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3258 						  fqs_task);
3259 		if (torture_init_error(firsterr))
3260 			goto unwind;
3261 	}
3262 	if (test_boost_interval < 1)
3263 		test_boost_interval = 1;
3264 	if (test_boost_duration < 2)
3265 		test_boost_duration = 2;
3266 	if (rcu_torture_can_boost()) {
3267 
3268 		boost_starttime = jiffies + test_boost_interval * HZ;
3269 
3270 		firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3271 					     rcutorture_booster_init,
3272 					     rcutorture_booster_cleanup);
3273 		rcutor_hp = firsterr;
3274 		if (torture_init_error(firsterr))
3275 			goto unwind;
3276 
3277 		// Testing RCU priority boosting requires rcutorture do
3278 		// some serious abuse.  Counter this by running ksoftirqd
3279 		// at higher priority.
3280 		if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
3281 			for_each_online_cpu(cpu) {
3282 				struct sched_param sp;
3283 				struct task_struct *t;
3284 
3285 				t = per_cpu(ksoftirqd, cpu);
3286 				WARN_ON_ONCE(!t);
3287 				sp.sched_priority = 2;
3288 				sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3289 			}
3290 		}
3291 	}
3292 	shutdown_jiffies = jiffies + shutdown_secs * HZ;
3293 	firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3294 	if (torture_init_error(firsterr))
3295 		goto unwind;
3296 	firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3297 				      rcutorture_sync);
3298 	if (torture_init_error(firsterr))
3299 		goto unwind;
3300 	firsterr = rcu_torture_stall_init();
3301 	if (torture_init_error(firsterr))
3302 		goto unwind;
3303 	firsterr = rcu_torture_fwd_prog_init();
3304 	if (torture_init_error(firsterr))
3305 		goto unwind;
3306 	firsterr = rcu_torture_barrier_init();
3307 	if (torture_init_error(firsterr))
3308 		goto unwind;
3309 	firsterr = rcu_torture_read_exit_init();
3310 	if (torture_init_error(firsterr))
3311 		goto unwind;
3312 	if (object_debug)
3313 		rcu_test_debug_objects();
3314 	torture_init_end();
3315 	return 0;
3316 
3317 unwind:
3318 	torture_init_end();
3319 	rcu_torture_cleanup();
3320 	if (shutdown_secs) {
3321 		WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3322 		kernel_power_off();
3323 	}
3324 	return firsterr;
3325 }
3326 
3327 module_init(rcu_torture_init);
3328 module_exit(rcu_torture_cleanup);
3329