xref: /linux/kernel/rcu/rcutorture.c (revision 95c7d025cc8c3c6c41206e2a18332eb04878b7ef)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update module-based torture test facility
4  *
5  * Copyright (C) IBM Corporation, 2005, 2006
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  *	  Josh Triplett <josh@joshtriplett.org>
9  *
10  * See also:  Documentation/RCU/torture.rst
11  */
12 
13 #define pr_fmt(fmt) fmt
14 
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/rcu_notifier.h>
25 #include <linux/interrupt.h>
26 #include <linux/sched/signal.h>
27 #include <uapi/linux/sched/types.h>
28 #include <linux/atomic.h>
29 #include <linux/bitops.h>
30 #include <linux/completion.h>
31 #include <linux/moduleparam.h>
32 #include <linux/percpu.h>
33 #include <linux/notifier.h>
34 #include <linux/reboot.h>
35 #include <linux/freezer.h>
36 #include <linux/cpu.h>
37 #include <linux/delay.h>
38 #include <linux/stat.h>
39 #include <linux/srcu.h>
40 #include <linux/slab.h>
41 #include <linux/trace_clock.h>
42 #include <asm/byteorder.h>
43 #include <linux/torture.h>
44 #include <linux/vmalloc.h>
45 #include <linux/sched/debug.h>
46 #include <linux/sched/sysctl.h>
47 #include <linux/oom.h>
48 #include <linux/tick.h>
49 #include <linux/rcupdate_trace.h>
50 #include <linux/nmi.h>
51 
52 #include "rcu.h"
53 
54 MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility");
55 MODULE_LICENSE("GPL");
56 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
57 
58 // Bits for ->extendables field, extendables param, and related definitions.
59 #define RCUTORTURE_RDR_SHIFT_1	8	// Put SRCU index in upper bits.
60 #define RCUTORTURE_RDR_MASK_1	(0xff << RCUTORTURE_RDR_SHIFT_1)
61 #define RCUTORTURE_RDR_SHIFT_2	16	// Put SRCU index in upper bits.
62 #define RCUTORTURE_RDR_MASK_2	(0xff << RCUTORTURE_RDR_SHIFT_2)
63 #define RCUTORTURE_RDR_BH	0x01	// Extend readers by disabling bh.
64 #define RCUTORTURE_RDR_IRQ	0x02	//  ... disabling interrupts.
65 #define RCUTORTURE_RDR_PREEMPT	0x04	//  ... disabling preemption.
66 #define RCUTORTURE_RDR_RBH	0x08	//  ... rcu_read_lock_bh().
67 #define RCUTORTURE_RDR_SCHED	0x10	//  ... rcu_read_lock_sched().
68 #define RCUTORTURE_RDR_RCU_1	0x20	//  ... entering another RCU reader.
69 #define RCUTORTURE_RDR_RCU_2	0x40	//  ... entering another RCU reader.
70 #define RCUTORTURE_RDR_UPDOWN	0x80	//  ... up-read from task, down-read from timer.
71 					//	Note: Manual start, automatic end.
72 #define RCUTORTURE_RDR_NBITS	8	// Number of bits defined above.
73 #define RCUTORTURE_MAX_EXTEND	\
74 	(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
75 	 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)  // Intentionally omit RCUTORTURE_RDR_UPDOWN.
76 #define RCUTORTURE_RDR_ALLBITS	\
77 	(RCUTORTURE_MAX_EXTEND | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2 | \
78 	 RCUTORTURE_RDR_MASK_1 | RCUTORTURE_RDR_MASK_2)
79 #define RCUTORTURE_RDR_MAX_LOOPS 0x7	/* Maximum reader extensions. */
80 					/* Must be power of two minus one. */
81 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
82 
83 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
84 	      "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
85 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable");
86 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
87 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
88 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
89 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
90 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
91 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
92 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
93 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives");
94 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
95 torture_param(bool, gp_cond_exp_full, false,
96 		    "Use conditional/async full-stateexpedited GP wait primitives");
97 torture_param(int, gp_cond_wi, 16 * USEC_PER_SEC / HZ,
98 		   "Wait interval for normal conditional grace periods, us (default 16 jiffies)");
99 torture_param(int, gp_cond_wi_exp, 128,
100 		   "Wait interval for expedited conditional grace periods, us (default 128 us)");
101 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
102 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
103 torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
104 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
105 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
106 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
107 torture_param(int, gp_poll_wi, 16 * USEC_PER_SEC / HZ,
108 		   "Wait interval for normal polled grace periods, us (default 16 jiffies)");
109 torture_param(int, gp_poll_wi_exp, 128,
110 		   "Wait interval for expedited polled grace periods, us (default 128 us)");
111 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
112 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
113 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
114 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
115 torture_param(int, n_up_down, 32, "# of concurrent up/down hrtimer-based RCU readers");
116 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
117 torture_param(int, nreaders, -1, "Number of RCU reader threads");
118 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
119 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
120 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
121 torture_param(bool, gpwrap_lag, true, "Enable grace-period wrap lag testing");
122 torture_param(int, gpwrap_lag_gps, 8, "Value to set for set_gpwrap_lag during an active testing period.");
123 torture_param(int, gpwrap_lag_cycle_mins, 30, "Total cycle duration for gpwrap lag testing (in minutes)");
124 torture_param(int, gpwrap_lag_active_mins, 5, "Duration for which gpwrap lag is active within each cycle (in minutes)");
125 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
126 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
127 torture_param(int, preempt_duration, 0, "Preemption duration (ms), zero to disable");
128 torture_param(int, preempt_interval, MSEC_PER_SEC, "Interval between preemptions (ms)");
129 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
130 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
131 torture_param(int, reader_flavor, SRCU_READ_FLAVOR_NORMAL, "Reader flavors to use, one per bit.");
132 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
133 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
134 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
135 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s).");
136 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
137 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
138 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
139 torture_param(int, stall_cpu_repeat, 0, "Number of additional stalls after the first one.");
140 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
141 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
142 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
143 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
144 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds.");
145 torture_param(int, test_boost_holdoff, 0, "Holdoff time from rcutorture start, seconds.");
146 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds.");
147 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable.");
148 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs");
149 torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario.");
150 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
151 
152 static char *torture_type = "rcu";
153 module_param(torture_type, charp, 0444);
154 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
155 
156 static int nrealnocbers;
157 static int nrealreaders;
158 static int nrealfakewriters;
159 static struct task_struct *writer_task;
160 static struct task_struct **fakewriter_tasks;
161 static struct task_struct **reader_tasks;
162 static struct task_struct *updown_task;
163 static struct task_struct **nocb_tasks;
164 static struct task_struct *stats_task;
165 static struct task_struct *fqs_task;
166 static struct task_struct *boost_tasks[NR_CPUS];
167 static struct task_struct *stall_task;
168 static struct task_struct **fwd_prog_tasks;
169 static struct task_struct **barrier_cbs_tasks;
170 static struct task_struct *barrier_task;
171 static struct task_struct *read_exit_task;
172 static struct task_struct *preempt_task;
173 
174 #define RCU_TORTURE_PIPE_LEN 10
175 
176 // Mailbox-like structure to check RCU global memory ordering.
177 struct rcu_torture_reader_check {
178 	unsigned long rtc_myloops;
179 	int rtc_chkrdr;
180 	unsigned long rtc_chkloops;
181 	int rtc_ready;
182 	struct rcu_torture_reader_check *rtc_assigner;
183 } ____cacheline_internodealigned_in_smp;
184 
185 // Update-side data structure used to check RCU readers.
186 struct rcu_torture {
187 	struct rcu_head rtort_rcu;
188 	int rtort_pipe_count;
189 	struct list_head rtort_free;
190 	int rtort_mbtest;
191 	struct rcu_torture_reader_check *rtort_chkp;
192 };
193 
194 static LIST_HEAD(rcu_torture_freelist);
195 static struct rcu_torture __rcu *rcu_torture_current;
196 static unsigned long rcu_torture_current_version;
197 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
198 static DEFINE_SPINLOCK(rcu_torture_lock);
199 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
200 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
201 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
202 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
203 static atomic_t n_rcu_torture_alloc;
204 static atomic_t n_rcu_torture_alloc_fail;
205 static atomic_t n_rcu_torture_free;
206 static atomic_t n_rcu_torture_mberror;
207 static atomic_t n_rcu_torture_mbchk_fail;
208 static atomic_t n_rcu_torture_mbchk_tries;
209 static atomic_t n_rcu_torture_error;
210 static long n_rcu_torture_barrier_error;
211 static long n_rcu_torture_boost_ktrerror;
212 static long n_rcu_torture_boost_failure;
213 static long n_rcu_torture_boosts;
214 static atomic_long_t n_rcu_torture_timers;
215 static long n_barrier_attempts;
216 static long n_barrier_successes; /* did rcu_barrier test succeed? */
217 static unsigned long n_read_exits;
218 static struct list_head rcu_torture_removed;
219 static unsigned long shutdown_jiffies;
220 static unsigned long start_gp_seq;
221 static atomic_long_t n_nocb_offload;
222 static atomic_long_t n_nocb_deoffload;
223 
224 static int rcu_torture_writer_state;
225 #define RTWS_FIXED_DELAY	0
226 #define RTWS_DELAY		1
227 #define RTWS_REPLACE		2
228 #define RTWS_DEF_FREE		3
229 #define RTWS_EXP_SYNC		4
230 #define RTWS_COND_GET		5
231 #define RTWS_COND_GET_FULL	6
232 #define RTWS_COND_GET_EXP	7
233 #define RTWS_COND_GET_EXP_FULL	8
234 #define RTWS_COND_SYNC		9
235 #define RTWS_COND_SYNC_FULL	10
236 #define RTWS_COND_SYNC_EXP	11
237 #define RTWS_COND_SYNC_EXP_FULL	12
238 #define RTWS_POLL_GET		13
239 #define RTWS_POLL_GET_FULL	14
240 #define RTWS_POLL_GET_EXP	15
241 #define RTWS_POLL_GET_EXP_FULL	16
242 #define RTWS_POLL_WAIT		17
243 #define RTWS_POLL_WAIT_FULL	18
244 #define RTWS_POLL_WAIT_EXP	19
245 #define RTWS_POLL_WAIT_EXP_FULL	20
246 #define RTWS_SYNC		21
247 #define RTWS_STUTTER		22
248 #define RTWS_STOPPING		23
249 static const char * const rcu_torture_writer_state_names[] = {
250 	"RTWS_FIXED_DELAY",
251 	"RTWS_DELAY",
252 	"RTWS_REPLACE",
253 	"RTWS_DEF_FREE",
254 	"RTWS_EXP_SYNC",
255 	"RTWS_COND_GET",
256 	"RTWS_COND_GET_FULL",
257 	"RTWS_COND_GET_EXP",
258 	"RTWS_COND_GET_EXP_FULL",
259 	"RTWS_COND_SYNC",
260 	"RTWS_COND_SYNC_FULL",
261 	"RTWS_COND_SYNC_EXP",
262 	"RTWS_COND_SYNC_EXP_FULL",
263 	"RTWS_POLL_GET",
264 	"RTWS_POLL_GET_FULL",
265 	"RTWS_POLL_GET_EXP",
266 	"RTWS_POLL_GET_EXP_FULL",
267 	"RTWS_POLL_WAIT",
268 	"RTWS_POLL_WAIT_FULL",
269 	"RTWS_POLL_WAIT_EXP",
270 	"RTWS_POLL_WAIT_EXP_FULL",
271 	"RTWS_SYNC",
272 	"RTWS_STUTTER",
273 	"RTWS_STOPPING",
274 };
275 
276 /* Record reader segment types and duration for first failing read. */
277 struct rt_read_seg {
278 	int rt_readstate;
279 	unsigned long rt_delay_jiffies;
280 	unsigned long rt_delay_ms;
281 	unsigned long rt_delay_us;
282 	bool rt_preempted;
283 	int rt_cpu;
284 	int rt_end_cpu;
285 	unsigned long long rt_gp_seq;
286 	unsigned long long rt_gp_seq_end;
287 	u64 rt_ts;
288 };
289 static int err_segs_recorded;
290 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
291 static int rt_read_nsegs;
292 static int rt_read_preempted;
293 
294 static const char *rcu_torture_writer_state_getname(void)
295 {
296 	unsigned int i = READ_ONCE(rcu_torture_writer_state);
297 
298 	if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
299 		return "???";
300 	return rcu_torture_writer_state_names[i];
301 }
302 
303 #ifdef CONFIG_RCU_TRACE
304 static u64 notrace rcu_trace_clock_local(void)
305 {
306 	u64 ts = trace_clock_local();
307 
308 	(void)do_div(ts, NSEC_PER_USEC);
309 	return ts;
310 }
311 #else /* #ifdef CONFIG_RCU_TRACE */
312 static u64 notrace rcu_trace_clock_local(void)
313 {
314 	return 0ULL;
315 }
316 #endif /* #else #ifdef CONFIG_RCU_TRACE */
317 
318 /*
319  * Stop aggressive CPU-hog tests a bit before the end of the test in order
320  * to avoid interfering with test shutdown.
321  */
322 static bool shutdown_time_arrived(void)
323 {
324 	return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
325 }
326 
327 static unsigned long boost_starttime;	/* jiffies of next boost test start. */
328 static DEFINE_MUTEX(boost_mutex);	/* protect setting boost_starttime */
329 					/*  and boost task create/destroy. */
330 static atomic_t barrier_cbs_count;	/* Barrier callbacks registered. */
331 static bool barrier_phase;		/* Test phase. */
332 static atomic_t barrier_cbs_invoked;	/* Barrier callbacks invoked. */
333 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
334 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
335 
336 static atomic_t rcu_fwd_cb_nodelay;	/* Short rcu_torture_delay() delays. */
337 
338 /*
339  * Allocate an element from the rcu_tortures pool.
340  */
341 static struct rcu_torture *
342 rcu_torture_alloc(void)
343 {
344 	struct list_head *p;
345 
346 	spin_lock_bh(&rcu_torture_lock);
347 	if (list_empty(&rcu_torture_freelist)) {
348 		atomic_inc(&n_rcu_torture_alloc_fail);
349 		spin_unlock_bh(&rcu_torture_lock);
350 		return NULL;
351 	}
352 	atomic_inc(&n_rcu_torture_alloc);
353 	p = rcu_torture_freelist.next;
354 	list_del_init(p);
355 	spin_unlock_bh(&rcu_torture_lock);
356 	return container_of(p, struct rcu_torture, rtort_free);
357 }
358 
359 /*
360  * Free an element to the rcu_tortures pool.
361  */
362 static void
363 rcu_torture_free(struct rcu_torture *p)
364 {
365 	atomic_inc(&n_rcu_torture_free);
366 	spin_lock_bh(&rcu_torture_lock);
367 	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
368 	spin_unlock_bh(&rcu_torture_lock);
369 }
370 
371 /*
372  * Operations vector for selecting different types of tests.
373  */
374 
375 struct rcu_torture_ops {
376 	int ttype;
377 	void (*init)(void);
378 	void (*cleanup)(void);
379 	int (*readlock)(void);
380 	void (*read_delay)(struct torture_random_state *rrsp,
381 			   struct rt_read_seg *rtrsp);
382 	void (*readunlock)(int idx);
383 	int (*readlock_held)(void);   // lockdep.
384 	int (*readlock_nesting)(void); // actual nesting, if available, -1 if not.
385 	int (*down_read)(void);
386 	void (*up_read)(int idx);
387 	unsigned long (*get_gp_seq)(void);
388 	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
389 	void (*deferred_free)(struct rcu_torture *p);
390 	void (*sync)(void);
391 	void (*exp_sync)(void);
392 	void (*exp_current)(void);
393 	unsigned long (*get_gp_state_exp)(void);
394 	unsigned long (*start_gp_poll_exp)(void);
395 	void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp);
396 	bool (*poll_gp_state_exp)(unsigned long oldstate);
397 	void (*cond_sync_exp)(unsigned long oldstate);
398 	void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp);
399 	unsigned long (*get_comp_state)(void);
400 	void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp);
401 	bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2);
402 	bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2);
403 	unsigned long (*get_gp_state)(void);
404 	void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
405 	unsigned long (*start_gp_poll)(void);
406 	void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
407 	bool (*poll_gp_state)(unsigned long oldstate);
408 	bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp);
409 	bool (*poll_need_2gp)(bool poll, bool poll_full);
410 	void (*cond_sync)(unsigned long oldstate);
411 	void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
412 	int poll_active;
413 	int poll_active_full;
414 	call_rcu_func_t call;
415 	void (*cb_barrier)(void);
416 	void (*fqs)(void);
417 	void (*stats)(void);
418 	void (*gp_kthread_dbg)(void);
419 	bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
420 	int (*stall_dur)(void);
421 	void (*get_gp_data)(int *flags, unsigned long *gp_seq);
422 	void (*gp_slow_register)(atomic_t *rgssp);
423 	void (*gp_slow_unregister)(atomic_t *rgssp);
424 	bool (*reader_blocked)(void);
425 	unsigned long long (*gather_gp_seqs)(void);
426 	void (*format_gp_seqs)(unsigned long long seqs, char *cp, size_t len);
427 	void (*set_gpwrap_lag)(unsigned long lag);
428 	int (*get_gpwrap_count)(int cpu);
429 	long cbflood_max;
430 	int irq_capable;
431 	int can_boost;
432 	int extendables;
433 	int slow_gps;
434 	int no_pi_lock;
435 	int debug_objects;
436 	int start_poll_irqsoff;
437 	int have_up_down;
438 	const char *name;
439 };
440 
441 static struct rcu_torture_ops *cur_ops;
442 
443 /*
444  * Definitions for rcu torture testing.
445  */
446 
447 static int torture_readlock_not_held(void)
448 {
449 	return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
450 }
451 
452 static int rcu_torture_read_lock(void)
453 {
454 	rcu_read_lock();
455 	return 0;
456 }
457 
458 static void
459 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
460 {
461 	unsigned long started;
462 	unsigned long completed;
463 	const unsigned long shortdelay_us = 200;
464 	unsigned long longdelay_ms = 300;
465 	unsigned long long ts;
466 
467 	/* We want a short delay sometimes to make a reader delay the grace
468 	 * period, and we want a long delay occasionally to trigger
469 	 * force_quiescent_state. */
470 
471 	if (!atomic_read(&rcu_fwd_cb_nodelay) &&
472 	    !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
473 		started = cur_ops->get_gp_seq();
474 		ts = rcu_trace_clock_local();
475 		if ((preempt_count() & HARDIRQ_MASK) || softirq_count())
476 			longdelay_ms = 5; /* Avoid triggering BH limits. */
477 		mdelay(longdelay_ms);
478 		rtrsp->rt_delay_ms = longdelay_ms;
479 		completed = cur_ops->get_gp_seq();
480 		do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
481 					  started, completed);
482 	}
483 	if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
484 		udelay(shortdelay_us);
485 		rtrsp->rt_delay_us = shortdelay_us;
486 	}
487 	if (!preempt_count() &&
488 	    !(torture_random(rrsp) % (nrealreaders * 500)))
489 		torture_preempt_schedule();  /* QS only if preemptible. */
490 }
491 
492 static void rcu_torture_read_unlock(int idx)
493 {
494 	rcu_read_unlock();
495 }
496 
497 static int rcu_torture_readlock_nesting(void)
498 {
499 	if (IS_ENABLED(CONFIG_PREEMPT_RCU))
500 		return rcu_preempt_depth();
501 	if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
502 		return (preempt_count() & PREEMPT_MASK);
503 	return -1;
504 }
505 
506 /*
507  * Update callback in the pipe.  This should be invoked after a grace period.
508  */
509 static bool
510 rcu_torture_pipe_update_one(struct rcu_torture *rp)
511 {
512 	int i;
513 	struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
514 
515 	if (rtrcp) {
516 		WRITE_ONCE(rp->rtort_chkp, NULL);
517 		smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
518 	}
519 	i = rp->rtort_pipe_count;
520 	if (i > RCU_TORTURE_PIPE_LEN)
521 		i = RCU_TORTURE_PIPE_LEN;
522 	atomic_inc(&rcu_torture_wcount[i]);
523 	WRITE_ONCE(rp->rtort_pipe_count, i + 1);
524 	ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
525 	if (i + 1 >= RCU_TORTURE_PIPE_LEN) {
526 		rp->rtort_mbtest = 0;
527 		return true;
528 	}
529 	return false;
530 }
531 
532 /*
533  * Update all callbacks in the pipe.  Suitable for synchronous grace-period
534  * primitives.
535  */
536 static void
537 rcu_torture_pipe_update(struct rcu_torture *old_rp)
538 {
539 	struct rcu_torture *rp;
540 	struct rcu_torture *rp1;
541 
542 	if (old_rp)
543 		list_add(&old_rp->rtort_free, &rcu_torture_removed);
544 	list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
545 		if (rcu_torture_pipe_update_one(rp)) {
546 			list_del(&rp->rtort_free);
547 			rcu_torture_free(rp);
548 		}
549 	}
550 }
551 
552 static void
553 rcu_torture_cb(struct rcu_head *p)
554 {
555 	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
556 
557 	if (torture_must_stop_irq()) {
558 		/* Test is ending, just drop callbacks on the floor. */
559 		/* The next initialization will pick up the pieces. */
560 		return;
561 	}
562 	if (rcu_torture_pipe_update_one(rp))
563 		rcu_torture_free(rp);
564 	else
565 		cur_ops->deferred_free(rp);
566 }
567 
568 static unsigned long rcu_no_completed(void)
569 {
570 	return 0;
571 }
572 
573 static void rcu_torture_deferred_free(struct rcu_torture *p)
574 {
575 	call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb);
576 }
577 
578 static void rcu_sync_torture_init(void)
579 {
580 	INIT_LIST_HEAD(&rcu_torture_removed);
581 }
582 
583 static bool rcu_poll_need_2gp(bool poll, bool poll_full)
584 {
585 	return poll;
586 }
587 
588 static struct rcu_torture_ops rcu_ops = {
589 	.ttype			= RCU_FLAVOR,
590 	.init			= rcu_sync_torture_init,
591 	.readlock		= rcu_torture_read_lock,
592 	.read_delay		= rcu_read_delay,
593 	.readunlock		= rcu_torture_read_unlock,
594 	.readlock_held		= torture_readlock_not_held,
595 	.readlock_nesting	= rcu_torture_readlock_nesting,
596 	.get_gp_seq		= rcu_get_gp_seq,
597 	.gp_diff		= rcu_seq_diff,
598 	.deferred_free		= rcu_torture_deferred_free,
599 	.sync			= synchronize_rcu,
600 	.exp_sync		= synchronize_rcu_expedited,
601 	.same_gp_state		= same_state_synchronize_rcu,
602 	.same_gp_state_full	= same_state_synchronize_rcu_full,
603 	.get_comp_state		= get_completed_synchronize_rcu,
604 	.get_comp_state_full	= get_completed_synchronize_rcu_full,
605 	.get_gp_state		= get_state_synchronize_rcu,
606 	.get_gp_state_full	= get_state_synchronize_rcu_full,
607 	.start_gp_poll		= start_poll_synchronize_rcu,
608 	.start_gp_poll_full	= start_poll_synchronize_rcu_full,
609 	.poll_gp_state		= poll_state_synchronize_rcu,
610 	.poll_gp_state_full	= poll_state_synchronize_rcu_full,
611 	.poll_need_2gp		= rcu_poll_need_2gp,
612 	.cond_sync		= cond_synchronize_rcu,
613 	.cond_sync_full		= cond_synchronize_rcu_full,
614 	.poll_active		= NUM_ACTIVE_RCU_POLL_OLDSTATE,
615 	.poll_active_full	= NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE,
616 	.get_gp_state_exp	= get_state_synchronize_rcu,
617 	.start_gp_poll_exp	= start_poll_synchronize_rcu_expedited,
618 	.start_gp_poll_exp_full	= start_poll_synchronize_rcu_expedited_full,
619 	.poll_gp_state_exp	= poll_state_synchronize_rcu,
620 	.cond_sync_exp		= cond_synchronize_rcu_expedited,
621 	.cond_sync_exp_full	= cond_synchronize_rcu_expedited_full,
622 	.call			= call_rcu_hurry,
623 	.cb_barrier		= rcu_barrier,
624 	.fqs			= rcu_force_quiescent_state,
625 	.gp_kthread_dbg		= show_rcu_gp_kthreads,
626 	.check_boost_failed	= rcu_check_boost_fail,
627 	.stall_dur		= rcu_jiffies_till_stall_check,
628 	.get_gp_data		= rcutorture_get_gp_data,
629 	.gp_slow_register	= rcu_gp_slow_register,
630 	.gp_slow_unregister	= rcu_gp_slow_unregister,
631 	.reader_blocked		= IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)
632 				  ? has_rcu_reader_blocked
633 				  : NULL,
634 	.gather_gp_seqs		= rcutorture_gather_gp_seqs,
635 	.format_gp_seqs		= rcutorture_format_gp_seqs,
636 	.set_gpwrap_lag		= rcu_set_gpwrap_lag,
637 	.get_gpwrap_count	= rcu_get_gpwrap_count,
638 	.irq_capable		= 1,
639 	.can_boost		= IS_ENABLED(CONFIG_RCU_BOOST),
640 	.extendables		= RCUTORTURE_MAX_EXTEND,
641 	.debug_objects		= 1,
642 	.start_poll_irqsoff	= 1,
643 	.name			= "rcu"
644 };
645 
646 /*
647  * Don't even think about trying any of these in real life!!!
648  * The names includes "busted", and they really means it!
649  * The only purpose of these functions is to provide a buggy RCU
650  * implementation to make sure that rcutorture correctly emits
651  * buggy-RCU error messages.
652  */
653 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
654 {
655 	/* This is a deliberate bug for testing purposes only! */
656 	rcu_torture_cb(&p->rtort_rcu);
657 }
658 
659 static void synchronize_rcu_busted(void)
660 {
661 	/* This is a deliberate bug for testing purposes only! */
662 }
663 
664 static void
665 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
666 {
667 	/* This is a deliberate bug for testing purposes only! */
668 	func(head);
669 }
670 
671 static struct rcu_torture_ops rcu_busted_ops = {
672 	.ttype		= INVALID_RCU_FLAVOR,
673 	.init		= rcu_sync_torture_init,
674 	.readlock	= rcu_torture_read_lock,
675 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
676 	.readunlock	= rcu_torture_read_unlock,
677 	.readlock_held	= torture_readlock_not_held,
678 	.get_gp_seq	= rcu_no_completed,
679 	.deferred_free	= rcu_busted_torture_deferred_free,
680 	.sync		= synchronize_rcu_busted,
681 	.exp_sync	= synchronize_rcu_busted,
682 	.call		= call_rcu_busted,
683 	.gather_gp_seqs	= rcutorture_gather_gp_seqs,
684 	.format_gp_seqs	= rcutorture_format_gp_seqs,
685 	.irq_capable	= 1,
686 	.extendables	= RCUTORTURE_MAX_EXTEND,
687 	.name		= "busted"
688 };
689 
690 /*
691  * Definitions for srcu torture testing.
692  */
693 
694 DEFINE_STATIC_SRCU(srcu_ctl);
695 DEFINE_STATIC_SRCU_FAST(srcu_ctlf);
696 DEFINE_STATIC_SRCU_FAST_UPDOWN(srcu_ctlfud);
697 static struct srcu_struct srcu_ctld;
698 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
699 static struct rcu_torture_ops srcud_ops;
700 
701 static void srcu_torture_init(void)
702 {
703 	rcu_sync_torture_init();
704 	if (!reader_flavor || (reader_flavor & SRCU_READ_FLAVOR_NORMAL))
705 		VERBOSE_TOROUT_STRING("srcu_torture_init normal SRCU");
706 	if (reader_flavor & SRCU_READ_FLAVOR_NMI)
707 		VERBOSE_TOROUT_STRING("srcu_torture_init NMI-safe SRCU");
708 	if (reader_flavor & SRCU_READ_FLAVOR_FAST) {
709 		srcu_ctlp = &srcu_ctlf;
710 		VERBOSE_TOROUT_STRING("srcu_torture_init fast SRCU");
711 	}
712 	if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) {
713 		srcu_ctlp = &srcu_ctlfud;
714 		VERBOSE_TOROUT_STRING("srcu_torture_init fast-up/down SRCU");
715 	}
716 }
717 
718 static void srcu_get_gp_data(int *flags, unsigned long *gp_seq)
719 {
720 	srcutorture_get_gp_data(srcu_ctlp, flags, gp_seq);
721 }
722 
723 static int srcu_torture_read_lock(void)
724 {
725 	int idx;
726 	struct srcu_ctr __percpu *scp;
727 	int ret = 0;
728 
729 	WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL);
730 
731 	if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) {
732 		idx = srcu_read_lock(srcu_ctlp);
733 		WARN_ON_ONCE(idx & ~0x1);
734 		ret += idx;
735 	}
736 	if (reader_flavor & SRCU_READ_FLAVOR_NMI) {
737 		idx = srcu_read_lock_nmisafe(srcu_ctlp);
738 		WARN_ON_ONCE(idx & ~0x1);
739 		ret += idx << 1;
740 	}
741 	if (reader_flavor & SRCU_READ_FLAVOR_FAST) {
742 		scp = srcu_read_lock_fast(srcu_ctlp);
743 		idx = __srcu_ptr_to_ctr(srcu_ctlp, scp);
744 		WARN_ON_ONCE(idx & ~0x1);
745 		ret += idx << 2;
746 	}
747 	if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) {
748 		scp = srcu_read_lock_fast_updown(srcu_ctlp);
749 		idx = __srcu_ptr_to_ctr(srcu_ctlp, scp);
750 		WARN_ON_ONCE(idx & ~0x1);
751 		ret += idx << 3;
752 	}
753 	return ret;
754 }
755 
756 static void
757 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
758 {
759 	long delay;
760 	const long uspertick = 1000000 / HZ;
761 	const long longdelay = 10;
762 
763 	/* We want there to be long-running readers, but not all the time. */
764 
765 	delay = torture_random(rrsp) %
766 		(nrealreaders * 2 * longdelay * uspertick);
767 	if (!delay && in_task()) {
768 		schedule_timeout_interruptible(longdelay);
769 		rtrsp->rt_delay_jiffies = longdelay;
770 	} else {
771 		rcu_read_delay(rrsp, rtrsp);
772 	}
773 }
774 
775 static void srcu_torture_read_unlock(int idx)
776 {
777 	WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1)));
778 	if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN)
779 		srcu_read_unlock_fast_updown(srcu_ctlp,
780 					     __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3));
781 	if (reader_flavor & SRCU_READ_FLAVOR_FAST)
782 		srcu_read_unlock_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x4) >> 2));
783 	if (reader_flavor & SRCU_READ_FLAVOR_NMI)
784 		srcu_read_unlock_nmisafe(srcu_ctlp, (idx & 0x2) >> 1);
785 	if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL))
786 		srcu_read_unlock(srcu_ctlp, idx & 0x1);
787 }
788 
789 static int torture_srcu_read_lock_held(void)
790 {
791 	return srcu_read_lock_held(srcu_ctlp);
792 }
793 
794 static bool srcu_torture_have_up_down(void)
795 {
796 	int rf = reader_flavor;
797 
798 	if (!rf)
799 		rf = SRCU_READ_FLAVOR_NORMAL;
800 	return !!(cur_ops->have_up_down & rf);
801 }
802 
803 static int srcu_torture_down_read(void)
804 {
805 	int idx;
806 	struct srcu_ctr __percpu *scp;
807 
808 	WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL);
809 	WARN_ON_ONCE(reader_flavor & (reader_flavor - 1));
810 
811 	if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) {
812 		idx = srcu_down_read(srcu_ctlp);
813 		WARN_ON_ONCE(idx & ~0x1);
814 		return idx;
815 	}
816 	if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) {
817 		scp = srcu_down_read_fast(srcu_ctlp);
818 		idx = __srcu_ptr_to_ctr(srcu_ctlp, scp);
819 		WARN_ON_ONCE(idx & ~0x1);
820 		return idx << 3;
821 	}
822 	WARN_ON_ONCE(1);
823 	return 0;
824 }
825 
826 static void srcu_torture_up_read(int idx)
827 {
828 	WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1)));
829 	if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN)
830 		srcu_up_read_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3));
831 	else if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) ||
832 		 !(reader_flavor & SRCU_READ_FLAVOR_ALL))
833 		srcu_up_read(srcu_ctlp, idx & 0x1);
834 	else
835 		WARN_ON_ONCE(1);
836 }
837 
838 static unsigned long srcu_torture_completed(void)
839 {
840 	return srcu_batches_completed(srcu_ctlp);
841 }
842 
843 static void srcu_torture_deferred_free(struct rcu_torture *rp)
844 {
845 	unsigned long flags;
846 	bool lockit = jiffies & 0x1;
847 
848 	if (lockit)
849 		raw_spin_lock_irqsave(&current->pi_lock, flags);
850 	call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
851 	if (lockit)
852 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);
853 }
854 
855 static void srcu_torture_synchronize(void)
856 {
857 	synchronize_srcu(srcu_ctlp);
858 }
859 
860 static unsigned long srcu_torture_get_gp_state(void)
861 {
862 	return get_state_synchronize_srcu(srcu_ctlp);
863 }
864 
865 static unsigned long srcu_torture_start_gp_poll(void)
866 {
867 	return start_poll_synchronize_srcu(srcu_ctlp);
868 }
869 
870 static bool srcu_torture_poll_gp_state(unsigned long oldstate)
871 {
872 	return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
873 }
874 
875 static void srcu_torture_call(struct rcu_head *head,
876 			      rcu_callback_t func)
877 {
878 	call_srcu(srcu_ctlp, head, func);
879 }
880 
881 static void srcu_torture_barrier(void)
882 {
883 	srcu_barrier(srcu_ctlp);
884 }
885 
886 static void srcu_torture_stats(void)
887 {
888 	srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
889 }
890 
891 static void srcu_torture_synchronize_expedited(void)
892 {
893 	synchronize_srcu_expedited(srcu_ctlp);
894 }
895 
896 static void srcu_torture_expedite_current(void)
897 {
898 	srcu_expedite_current(srcu_ctlp);
899 }
900 
901 static struct rcu_torture_ops srcu_ops = {
902 	.ttype		= SRCU_FLAVOR,
903 	.init		= srcu_torture_init,
904 	.readlock	= srcu_torture_read_lock,
905 	.read_delay	= srcu_read_delay,
906 	.readunlock	= srcu_torture_read_unlock,
907 	.down_read	= srcu_torture_down_read,
908 	.up_read	= srcu_torture_up_read,
909 	.readlock_held	= torture_srcu_read_lock_held,
910 	.get_gp_seq	= srcu_torture_completed,
911 	.gp_diff	= rcu_seq_diff,
912 	.deferred_free	= srcu_torture_deferred_free,
913 	.sync		= srcu_torture_synchronize,
914 	.exp_sync	= srcu_torture_synchronize_expedited,
915 	.exp_current	= srcu_torture_expedite_current,
916 	.same_gp_state	= same_state_synchronize_srcu,
917 	.get_comp_state = get_completed_synchronize_srcu,
918 	.get_gp_state	= srcu_torture_get_gp_state,
919 	.start_gp_poll	= srcu_torture_start_gp_poll,
920 	.poll_gp_state	= srcu_torture_poll_gp_state,
921 	.poll_active	= NUM_ACTIVE_SRCU_POLL_OLDSTATE,
922 	.call		= srcu_torture_call,
923 	.cb_barrier	= srcu_torture_barrier,
924 	.stats		= srcu_torture_stats,
925 	.get_gp_data	= srcu_get_gp_data,
926 	.cbflood_max	= 50000,
927 	.irq_capable	= 1,
928 	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
929 	.debug_objects	= 1,
930 	.have_up_down	= IS_ENABLED(CONFIG_TINY_SRCU)
931 				? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST_UPDOWN,
932 	.name		= "srcu"
933 };
934 
935 static void srcud_torture_init(void)
936 {
937 	rcu_sync_torture_init();
938 	if (!reader_flavor || (reader_flavor & SRCU_READ_FLAVOR_NORMAL)) {
939 		WARN_ON(init_srcu_struct(&srcu_ctld));
940 		VERBOSE_TOROUT_STRING("srcud_torture_init normal SRCU");
941 	} else if (reader_flavor & SRCU_READ_FLAVOR_NMI) {
942 		WARN_ON(init_srcu_struct(&srcu_ctld));
943 		VERBOSE_TOROUT_STRING("srcud_torture_init NMI-safe SRCU");
944 	} else if (reader_flavor & SRCU_READ_FLAVOR_FAST) {
945 		WARN_ON(init_srcu_struct_fast(&srcu_ctld));
946 		VERBOSE_TOROUT_STRING("srcud_torture_init fast SRCU");
947 	} else if (reader_flavor & SRCU_READ_FLAVOR_FAST_UPDOWN) {
948 		WARN_ON(init_srcu_struct_fast_updown(&srcu_ctld));
949 		VERBOSE_TOROUT_STRING("srcud_torture_init fast-up/down SRCU");
950 	} else {
951 		WARN_ON(init_srcu_struct(&srcu_ctld));
952 	}
953 	srcu_ctlp = &srcu_ctld;
954 }
955 
956 static void srcu_torture_cleanup(void)
957 {
958 	cleanup_srcu_struct(&srcu_ctld);
959 	srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
960 }
961 
962 /* As above, but dynamically allocated. */
963 static struct rcu_torture_ops srcud_ops = {
964 	.ttype		= SRCU_FLAVOR,
965 	.init		= srcud_torture_init,
966 	.cleanup	= srcu_torture_cleanup,
967 	.readlock	= srcu_torture_read_lock,
968 	.read_delay	= srcu_read_delay,
969 	.readunlock	= srcu_torture_read_unlock,
970 	.readlock_held	= torture_srcu_read_lock_held,
971 	.down_read	= srcu_torture_down_read,
972 	.up_read	= srcu_torture_up_read,
973 	.get_gp_seq	= srcu_torture_completed,
974 	.gp_diff	= rcu_seq_diff,
975 	.deferred_free	= srcu_torture_deferred_free,
976 	.sync		= srcu_torture_synchronize,
977 	.exp_sync	= srcu_torture_synchronize_expedited,
978 	.exp_current	= srcu_torture_expedite_current,
979 	.same_gp_state	= same_state_synchronize_srcu,
980 	.get_comp_state = get_completed_synchronize_srcu,
981 	.get_gp_state	= srcu_torture_get_gp_state,
982 	.start_gp_poll	= srcu_torture_start_gp_poll,
983 	.poll_gp_state	= srcu_torture_poll_gp_state,
984 	.poll_active	= NUM_ACTIVE_SRCU_POLL_OLDSTATE,
985 	.call		= srcu_torture_call,
986 	.cb_barrier	= srcu_torture_barrier,
987 	.stats		= srcu_torture_stats,
988 	.get_gp_data	= srcu_get_gp_data,
989 	.cbflood_max	= 50000,
990 	.irq_capable	= 1,
991 	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
992 	.debug_objects	= 1,
993 	.have_up_down	= IS_ENABLED(CONFIG_TINY_SRCU)
994 				? 0 : SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_FAST_UPDOWN,
995 	.name		= "srcud"
996 };
997 
998 /* As above, but broken due to inappropriate reader extension. */
999 static struct rcu_torture_ops busted_srcud_ops = {
1000 	.ttype		= SRCU_FLAVOR,
1001 	.init		= srcu_torture_init,
1002 	.cleanup	= srcu_torture_cleanup,
1003 	.readlock	= srcu_torture_read_lock,
1004 	.read_delay	= rcu_read_delay,
1005 	.readunlock	= srcu_torture_read_unlock,
1006 	.readlock_held	= torture_srcu_read_lock_held,
1007 	.get_gp_seq	= srcu_torture_completed,
1008 	.deferred_free	= srcu_torture_deferred_free,
1009 	.sync		= srcu_torture_synchronize,
1010 	.exp_sync	= srcu_torture_synchronize_expedited,
1011 	.call		= srcu_torture_call,
1012 	.cb_barrier	= srcu_torture_barrier,
1013 	.stats		= srcu_torture_stats,
1014 	.irq_capable	= 1,
1015 	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
1016 	.extendables	= RCUTORTURE_MAX_EXTEND,
1017 	.name		= "busted_srcud"
1018 };
1019 
1020 /*
1021  * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
1022  * This implementation does not work well with CPU hotplug nor
1023  * with rcutorture's shuffling.
1024  */
1025 
1026 static void synchronize_rcu_trivial(void)
1027 {
1028 	int cpu;
1029 
1030 	for_each_online_cpu(cpu) {
1031 		torture_sched_setaffinity(current->pid, cpumask_of(cpu), true);
1032 		WARN_ON_ONCE(raw_smp_processor_id() != cpu);
1033 	}
1034 }
1035 
1036 static void rcu_sync_torture_init_trivial(void)
1037 {
1038 	rcu_sync_torture_init();
1039 	// if (onoff_interval || shuffle_interval) {
1040 	if (WARN_ONCE(onoff_interval || shuffle_interval, "%s: Non-zero onoff_interval (%d) or shuffle_interval (%d) breaks trivial RCU, resetting to zero", __func__, onoff_interval, shuffle_interval)) {
1041 		onoff_interval = 0;
1042 		shuffle_interval = 0;
1043 	}
1044 }
1045 
1046 static int rcu_torture_read_lock_trivial(void)
1047 {
1048 	preempt_disable();
1049 	return 0;
1050 }
1051 
1052 static void rcu_torture_read_unlock_trivial(int idx)
1053 {
1054 	preempt_enable();
1055 }
1056 
1057 static struct rcu_torture_ops trivial_ops = {
1058 	.ttype		= RCU_TRIVIAL_FLAVOR,
1059 	.init		= rcu_sync_torture_init_trivial,
1060 	.readlock	= rcu_torture_read_lock_trivial,
1061 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
1062 	.readunlock	= rcu_torture_read_unlock_trivial,
1063 	.readlock_held	= torture_readlock_not_held,
1064 	.get_gp_seq	= rcu_no_completed,
1065 	.sync		= synchronize_rcu_trivial,
1066 	.exp_sync	= synchronize_rcu_trivial,
1067 	.irq_capable	= 1,
1068 	.name		= "trivial"
1069 };
1070 
1071 #ifdef CONFIG_TRIVIAL_PREEMPT_RCU
1072 
1073 /*
1074  * Definitions for trivial CONFIG_PREEMPT=y torture testing.  This
1075  * implementation does not work well with large numbers of tasks or with
1076  * long-term preemption.  Either or both get you RCU CPU stall warnings.
1077  */
1078 
1079 static void rcu_sync_torture_init_trivial_preempt(void)
1080 {
1081 	rcu_sync_torture_init();
1082 	if (WARN_ONCE(onoff_interval || shuffle_interval, "%s: Non-zero onoff_interval (%d) or shuffle_interval (%d) breaks trivial RCU, resetting to zero", __func__, onoff_interval, shuffle_interval)) {
1083 		onoff_interval = 0;
1084 		shuffle_interval = 0;
1085 	}
1086 }
1087 
1088 static int rcu_torture_read_lock_trivial_preempt(void)
1089 {
1090 	struct task_struct *t = current;
1091 
1092 	WRITE_ONCE(t->rcu_trivial_preempt_nesting, t->rcu_trivial_preempt_nesting + 1);
1093 	smp_mb();
1094 	return 0;
1095 }
1096 
1097 static void rcu_torture_read_unlock_trivial_preempt(int idx)
1098 {
1099 	struct task_struct *t = current;
1100 
1101 	smp_store_release(&t->rcu_trivial_preempt_nesting, t->rcu_trivial_preempt_nesting - 1);
1102 }
1103 
1104 static struct rcu_torture_ops trivial_preempt_ops = {
1105 	.ttype		= RCU_TRIVIAL_FLAVOR,
1106 	.init		= rcu_sync_torture_init_trivial_preempt,
1107 	.readlock	= rcu_torture_read_lock_trivial_preempt,
1108 	.read_delay	= rcu_read_delay,  // just reuse rcu's version.
1109 	.readunlock	= rcu_torture_read_unlock_trivial_preempt,
1110 	.readlock_held	= torture_readlock_not_held,
1111 	.get_gp_seq	= rcu_no_completed,
1112 	.sync		= synchronize_rcu_trivial_preempt,
1113 	.exp_sync	= synchronize_rcu_trivial_preempt,
1114 	.irq_capable	= 0, // In theory it should be, but let's keep it trivial.
1115 	.name		= "trivial-preempt"
1116 };
1117 
1118 #define TRIVIAL_PREEMPT_OPS &trivial_preempt_ops,
1119 
1120 #else // #ifdef CONFIG_TRIVIAL_PREEMPT_RCU
1121 
1122 #define TRIVIAL_PREEMPT_OPS
1123 
1124 #endif // #else // #ifdef CONFIG_TRIVIAL_PREEMPT_RCU
1125 
1126 #ifdef CONFIG_TASKS_RCU
1127 
1128 /*
1129  * Definitions for RCU-tasks torture testing.
1130  */
1131 
1132 static int tasks_torture_read_lock(void)
1133 {
1134 	return 0;
1135 }
1136 
1137 static void tasks_torture_read_unlock(int idx)
1138 {
1139 }
1140 
1141 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
1142 {
1143 	call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
1144 }
1145 
1146 static void synchronize_rcu_mult_test(void)
1147 {
1148 	synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry);
1149 }
1150 
1151 static struct rcu_torture_ops tasks_ops = {
1152 	.ttype		= RCU_TASKS_FLAVOR,
1153 	.init		= rcu_sync_torture_init,
1154 	.readlock	= tasks_torture_read_lock,
1155 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
1156 	.readunlock	= tasks_torture_read_unlock,
1157 	.get_gp_seq	= rcu_no_completed,
1158 	.deferred_free	= rcu_tasks_torture_deferred_free,
1159 	.sync		= synchronize_rcu_tasks,
1160 	.exp_sync	= synchronize_rcu_mult_test,
1161 	.call		= call_rcu_tasks,
1162 	.cb_barrier	= rcu_barrier_tasks,
1163 	.gp_kthread_dbg	= show_rcu_tasks_classic_gp_kthread,
1164 	.get_gp_data	= rcu_tasks_get_gp_data,
1165 	.irq_capable	= 1,
1166 	.slow_gps	= 1,
1167 	.name		= "tasks"
1168 };
1169 
1170 #define TASKS_OPS &tasks_ops,
1171 
1172 #else // #ifdef CONFIG_TASKS_RCU
1173 
1174 #define TASKS_OPS
1175 
1176 #endif // #else #ifdef CONFIG_TASKS_RCU
1177 
1178 
1179 #ifdef CONFIG_TASKS_RUDE_RCU
1180 
1181 /*
1182  * Definitions for rude RCU-tasks torture testing.
1183  */
1184 
1185 static struct rcu_torture_ops tasks_rude_ops = {
1186 	.ttype		= RCU_TASKS_RUDE_FLAVOR,
1187 	.init		= rcu_sync_torture_init,
1188 	.readlock	= rcu_torture_read_lock_trivial,
1189 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
1190 	.readunlock	= rcu_torture_read_unlock_trivial,
1191 	.get_gp_seq	= rcu_no_completed,
1192 	.sync		= synchronize_rcu_tasks_rude,
1193 	.exp_sync	= synchronize_rcu_tasks_rude,
1194 	.gp_kthread_dbg	= show_rcu_tasks_rude_gp_kthread,
1195 	.get_gp_data	= rcu_tasks_rude_get_gp_data,
1196 	.cbflood_max	= 50000,
1197 	.irq_capable	= 1,
1198 	.name		= "tasks-rude"
1199 };
1200 
1201 #define TASKS_RUDE_OPS &tasks_rude_ops,
1202 
1203 #else // #ifdef CONFIG_TASKS_RUDE_RCU
1204 
1205 #define TASKS_RUDE_OPS
1206 
1207 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU
1208 
1209 
1210 #ifdef CONFIG_TASKS_TRACE_RCU
1211 
1212 /*
1213  * Definitions for tracing RCU-tasks torture testing.
1214  */
1215 
1216 static int tasks_tracing_torture_read_lock(void)
1217 {
1218 	rcu_read_lock_trace();
1219 	return 0;
1220 }
1221 
1222 static void tasks_tracing_torture_read_unlock(int idx)
1223 {
1224 	rcu_read_unlock_trace();
1225 }
1226 
1227 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
1228 {
1229 	call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
1230 }
1231 
1232 static struct rcu_torture_ops tasks_tracing_ops = {
1233 	.ttype		= RCU_TASKS_TRACING_FLAVOR,
1234 	.init		= rcu_sync_torture_init,
1235 	.readlock	= tasks_tracing_torture_read_lock,
1236 	.read_delay	= srcu_read_delay,  /* just reuse srcu's version. */
1237 	.readunlock	= tasks_tracing_torture_read_unlock,
1238 	.readlock_held	= rcu_read_lock_trace_held,
1239 	.get_gp_seq	= rcu_no_completed,
1240 	.deferred_free	= rcu_tasks_tracing_torture_deferred_free,
1241 	.sync		= synchronize_rcu_tasks_trace,
1242 	.exp_sync	= synchronize_rcu_tasks_trace,
1243 	.exp_current	= rcu_tasks_trace_expedite_current,
1244 	.call		= call_rcu_tasks_trace,
1245 	.cb_barrier	= rcu_barrier_tasks_trace,
1246 	.cbflood_max	= 50000,
1247 	.irq_capable	= 1,
1248 	.slow_gps	= 1,
1249 	.name		= "tasks-tracing"
1250 };
1251 
1252 #define TASKS_TRACING_OPS &tasks_tracing_ops,
1253 
1254 #else // #ifdef CONFIG_TASKS_TRACE_RCU
1255 
1256 #define TASKS_TRACING_OPS
1257 
1258 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
1259 
1260 
1261 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
1262 {
1263 	if (!cur_ops->gp_diff)
1264 		return new - old;
1265 	return cur_ops->gp_diff(new, old);
1266 }
1267 
1268 /*
1269  * RCU torture priority-boost testing.  Runs one real-time thread per
1270  * CPU for moderate bursts, repeatedly starting grace periods and waiting
1271  * for them to complete.  If a given grace period takes too long, we assume
1272  * that priority inversion has occurred.
1273  */
1274 
1275 static int old_rt_runtime = -1;
1276 
1277 static void rcu_torture_disable_rt_throttle(void)
1278 {
1279 	/*
1280 	 * Disable RT throttling so that rcutorture's boost threads don't get
1281 	 * throttled. Only possible if rcutorture is built-in otherwise the
1282 	 * user should manually do this by setting the sched_rt_period_us and
1283 	 * sched_rt_runtime sysctls.
1284 	 */
1285 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
1286 		return;
1287 
1288 	old_rt_runtime = sysctl_sched_rt_runtime;
1289 	sysctl_sched_rt_runtime = -1;
1290 }
1291 
1292 static void rcu_torture_enable_rt_throttle(void)
1293 {
1294 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
1295 		return;
1296 
1297 	sysctl_sched_rt_runtime = old_rt_runtime;
1298 	old_rt_runtime = -1;
1299 }
1300 
1301 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
1302 {
1303 	int cpu;
1304 	static int dbg_done;
1305 	unsigned long end = jiffies;
1306 	bool gp_done;
1307 	unsigned long j;
1308 	static unsigned long last_persist;
1309 	unsigned long lp;
1310 	unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
1311 
1312 	if (end - *start > mininterval) {
1313 		// Recheck after checking time to avoid false positives.
1314 		smp_mb(); // Time check before grace-period check.
1315 		if (cur_ops->poll_gp_state(gp_state))
1316 			return false; // passed, though perhaps just barely
1317 		if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
1318 			// At most one persisted message per boost test.
1319 			j = jiffies;
1320 			lp = READ_ONCE(last_persist);
1321 			if (time_after(j, lp + mininterval) &&
1322 			    cmpxchg(&last_persist, lp, j) == lp) {
1323 				if (cpu < 0)
1324 					pr_info("Boost inversion persisted: QS from all CPUs\n");
1325 				else
1326 					pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
1327 			}
1328 			return false; // passed on a technicality
1329 		}
1330 		VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
1331 		n_rcu_torture_boost_failure++;
1332 		if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
1333 			pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
1334 				current->rt_priority, gp_state, end - *start);
1335 			cur_ops->gp_kthread_dbg();
1336 			// Recheck after print to flag grace period ending during splat.
1337 			gp_done = cur_ops->poll_gp_state(gp_state);
1338 			pr_info("Boost inversion: GP %lu %s.\n", gp_state,
1339 				gp_done ? "ended already" : "still pending");
1340 
1341 		}
1342 
1343 		return true; // failed
1344 	} else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
1345 		*start = jiffies;
1346 	}
1347 
1348 	return false; // passed
1349 }
1350 
1351 static int rcu_torture_boost(void *arg)
1352 {
1353 	unsigned long endtime;
1354 	unsigned long gp_state;
1355 	unsigned long gp_state_time;
1356 	unsigned long oldstarttime;
1357 	unsigned long booststarttime = get_torture_init_jiffies() + test_boost_holdoff * HZ;
1358 
1359 	if (test_boost_holdoff <= 0 || time_after(jiffies, booststarttime)) {
1360 		VERBOSE_TOROUT_STRING("rcu_torture_boost started");
1361 	} else {
1362 		VERBOSE_TOROUT_STRING("rcu_torture_boost started holdoff period");
1363 		while (time_before(jiffies, booststarttime)) {
1364 			schedule_timeout_idle(HZ);
1365 			if (kthread_should_stop())
1366 				goto cleanup;
1367 		}
1368 		VERBOSE_TOROUT_STRING("rcu_torture_boost finished holdoff period");
1369 	}
1370 
1371 	/* Set real-time priority. */
1372 	sched_set_fifo_low(current);
1373 
1374 	/* Each pass through the following loop does one boost-test cycle. */
1375 	do {
1376 		bool failed = false; // Test failed already in this test interval
1377 		bool gp_initiated = false;
1378 
1379 		if (kthread_should_stop())
1380 			goto checkwait;
1381 
1382 		/* Wait for the next test interval. */
1383 		oldstarttime = READ_ONCE(boost_starttime);
1384 		while (time_before(jiffies, oldstarttime)) {
1385 			schedule_timeout_interruptible(oldstarttime - jiffies);
1386 			if (stutter_wait("rcu_torture_boost"))
1387 				sched_set_fifo_low(current);
1388 			if (torture_must_stop())
1389 				goto checkwait;
1390 		}
1391 
1392 		// Do one boost-test interval.
1393 		endtime = oldstarttime + test_boost_duration * HZ;
1394 		while (time_before(jiffies, endtime)) {
1395 			// Has current GP gone too long?
1396 			if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1397 				failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1398 			// If we don't have a grace period in flight, start one.
1399 			if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1400 				gp_state = cur_ops->start_gp_poll();
1401 				gp_initiated = true;
1402 				gp_state_time = jiffies;
1403 			}
1404 			if (stutter_wait("rcu_torture_boost")) {
1405 				sched_set_fifo_low(current);
1406 				// If the grace period already ended,
1407 				// we don't know when that happened, so
1408 				// start over.
1409 				if (cur_ops->poll_gp_state(gp_state))
1410 					gp_initiated = false;
1411 			}
1412 			if (torture_must_stop())
1413 				goto checkwait;
1414 		}
1415 
1416 		// In case the grace period extended beyond the end of the loop.
1417 		if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1418 			rcu_torture_boost_failed(gp_state, &gp_state_time);
1419 
1420 		/*
1421 		 * Set the start time of the next test interval.
1422 		 * Yes, this is vulnerable to long delays, but such
1423 		 * delays simply cause a false negative for the next
1424 		 * interval.  Besides, we are running at RT priority,
1425 		 * so delays should be relatively rare.
1426 		 */
1427 		while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
1428 			if (mutex_trylock(&boost_mutex)) {
1429 				if (oldstarttime == boost_starttime) {
1430 					WRITE_ONCE(boost_starttime,
1431 						   jiffies + test_boost_interval * HZ);
1432 					n_rcu_torture_boosts++;
1433 				}
1434 				mutex_unlock(&boost_mutex);
1435 				break;
1436 			}
1437 			schedule_timeout_uninterruptible(HZ / 20);
1438 		}
1439 
1440 		/* Go do the stutter. */
1441 checkwait:	if (stutter_wait("rcu_torture_boost"))
1442 			sched_set_fifo_low(current);
1443 	} while (!torture_must_stop());
1444 
1445 cleanup:
1446 	/* Clean up and exit. */
1447 	while (!kthread_should_stop()) {
1448 		torture_shutdown_absorb("rcu_torture_boost");
1449 		schedule_timeout_uninterruptible(HZ / 20);
1450 	}
1451 	torture_kthread_stopping("rcu_torture_boost");
1452 	return 0;
1453 }
1454 
1455 /*
1456  * RCU torture force-quiescent-state kthread.  Repeatedly induces
1457  * bursts of calls to force_quiescent_state(), increasing the probability
1458  * of occurrence of some important types of race conditions.
1459  */
1460 static int
1461 rcu_torture_fqs(void *arg)
1462 {
1463 	unsigned long fqs_resume_time;
1464 	int fqs_burst_remaining;
1465 	int oldnice = task_nice(current);
1466 
1467 	VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1468 	do {
1469 		fqs_resume_time = jiffies + fqs_stutter * HZ;
1470 		while (time_before(jiffies, fqs_resume_time) &&
1471 		       !kthread_should_stop()) {
1472 			schedule_timeout_interruptible(HZ / 20);
1473 		}
1474 		fqs_burst_remaining = fqs_duration;
1475 		while (fqs_burst_remaining > 0 &&
1476 		       !kthread_should_stop()) {
1477 			cur_ops->fqs();
1478 			udelay(fqs_holdoff);
1479 			fqs_burst_remaining -= fqs_holdoff;
1480 		}
1481 		if (stutter_wait("rcu_torture_fqs"))
1482 			sched_set_normal(current, oldnice);
1483 	} while (!torture_must_stop());
1484 	torture_kthread_stopping("rcu_torture_fqs");
1485 	return 0;
1486 }
1487 
1488 // Used by writers to randomly choose from the available grace-period primitives.
1489 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { };
1490 static int nsynctypes;
1491 
1492 /*
1493  * Determine which grace-period primitives are available.
1494  */
1495 static void rcu_torture_write_types(void)
1496 {
1497 	bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full;
1498 	bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp;
1499 	bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
1500 	bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync;
1501 
1502 	/* Initialize synctype[] array.  If none set, take default. */
1503 	if (!gp_cond1 &&
1504 	    !gp_cond_exp1 &&
1505 	    !gp_cond_full1 &&
1506 	    !gp_cond_exp_full1 &&
1507 	    !gp_exp1 &&
1508 	    !gp_poll_exp1 &&
1509 	    !gp_poll_exp_full1 &&
1510 	    !gp_normal1 &&
1511 	    !gp_poll1 &&
1512 	    !gp_poll_full1 &&
1513 	    !gp_sync1) {
1514 		gp_cond1 = true;
1515 		gp_cond_exp1 = true;
1516 		gp_cond_full1 = true;
1517 		gp_cond_exp_full1 = true;
1518 		gp_exp1 = true;
1519 		gp_poll_exp1 = true;
1520 		gp_poll_exp_full1 = true;
1521 		gp_normal1 = true;
1522 		gp_poll1 = true;
1523 		gp_poll_full1 = true;
1524 		gp_sync1 = true;
1525 	}
1526 	if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1527 		synctype[nsynctypes++] = RTWS_COND_GET;
1528 		pr_info("%s: Testing conditional GPs.\n", __func__);
1529 	} else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1530 		pr_alert("%s: gp_cond without primitives.\n", __func__);
1531 	}
1532 	if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) {
1533 		synctype[nsynctypes++] = RTWS_COND_GET_EXP;
1534 		pr_info("%s: Testing conditional expedited GPs.\n", __func__);
1535 	} else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
1536 		pr_alert("%s: gp_cond_exp without primitives.\n", __func__);
1537 	}
1538 	if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) {
1539 		synctype[nsynctypes++] = RTWS_COND_GET_FULL;
1540 		pr_info("%s: Testing conditional full-state GPs.\n", __func__);
1541 	} else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) {
1542 		pr_alert("%s: gp_cond_full without primitives.\n", __func__);
1543 	}
1544 	if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) {
1545 		synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL;
1546 		pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__);
1547 	} else if (gp_cond_exp_full &&
1548 		   (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) {
1549 		pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__);
1550 	}
1551 	if (gp_exp1 && cur_ops->exp_sync) {
1552 		synctype[nsynctypes++] = RTWS_EXP_SYNC;
1553 		pr_info("%s: Testing expedited GPs.\n", __func__);
1554 	} else if (gp_exp && !cur_ops->exp_sync) {
1555 		pr_alert("%s: gp_exp without primitives.\n", __func__);
1556 	}
1557 	if (gp_normal1 && cur_ops->deferred_free) {
1558 		synctype[nsynctypes++] = RTWS_DEF_FREE;
1559 		pr_info("%s: Testing asynchronous GPs.\n", __func__);
1560 	} else if (gp_normal && !cur_ops->deferred_free) {
1561 		pr_alert("%s: gp_normal without primitives.\n", __func__);
1562 	}
1563 	if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state &&
1564 	    cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1565 		synctype[nsynctypes++] = RTWS_POLL_GET;
1566 		pr_info("%s: Testing polling GPs.\n", __func__);
1567 	} else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1568 		pr_alert("%s: gp_poll without primitives.\n", __func__);
1569 	}
1570 	if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full
1571 	    && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) {
1572 		synctype[nsynctypes++] = RTWS_POLL_GET_FULL;
1573 		pr_info("%s: Testing polling full-state GPs.\n", __func__);
1574 	} else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) {
1575 		pr_alert("%s: gp_poll_full without primitives.\n", __func__);
1576 	}
1577 	if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
1578 		synctype[nsynctypes++] = RTWS_POLL_GET_EXP;
1579 		pr_info("%s: Testing polling expedited GPs.\n", __func__);
1580 	} else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
1581 		pr_alert("%s: gp_poll_exp without primitives.\n", __func__);
1582 	}
1583 	if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) {
1584 		synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL;
1585 		pr_info("%s: Testing polling full-state expedited GPs.\n", __func__);
1586 	} else if (gp_poll_exp_full &&
1587 		   (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) {
1588 		pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__);
1589 	}
1590 	if (gp_sync1 && cur_ops->sync) {
1591 		synctype[nsynctypes++] = RTWS_SYNC;
1592 		pr_info("%s: Testing normal GPs.\n", __func__);
1593 	} else if (gp_sync && !cur_ops->sync) {
1594 		pr_alert("%s: gp_sync without primitives.\n", __func__);
1595 	}
1596 	pr_alert("%s: Testing %d update types.\n", __func__, nsynctypes);
1597 	pr_info("%s: gp_cond_wi %d gp_cond_wi_exp %d gp_poll_wi %d gp_poll_wi_exp %d\n", __func__, gp_cond_wi, gp_cond_wi_exp, gp_poll_wi, gp_poll_wi_exp);
1598 }
1599 
1600 /*
1601  * Do the specified rcu_torture_writer() synchronous grace period,
1602  * while also testing out the polled APIs.  Note well that the single-CPU
1603  * grace-period optimizations must be accounted for.
1604  */
1605 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void))
1606 {
1607 	unsigned long cookie;
1608 	struct rcu_gp_oldstate cookie_full;
1609 	bool dopoll;
1610 	bool dopoll_full;
1611 	unsigned long r = torture_random(trsp);
1612 
1613 	dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300);
1614 	dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00);
1615 	if (dopoll || dopoll_full)
1616 		cpus_read_lock();
1617 	if (dopoll)
1618 		cookie = cur_ops->get_gp_state();
1619 	if (dopoll_full)
1620 		cur_ops->get_gp_state_full(&cookie_full);
1621 	if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full))
1622 		sync();
1623 	sync();
1624 	WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie),
1625 		  "%s: Cookie check 3 failed %pS() online %*pbl.",
1626 		  __func__, sync, cpumask_pr_args(cpu_online_mask));
1627 	WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full),
1628 		  "%s: Cookie check 4 failed %pS() online %*pbl",
1629 		  __func__, sync, cpumask_pr_args(cpu_online_mask));
1630 	if (dopoll || dopoll_full)
1631 		cpus_read_unlock();
1632 }
1633 
1634 /*
1635  * RCU torture writer kthread.  Repeatedly substitutes a new structure
1636  * for that pointed to by rcu_torture_current, freeing the old structure
1637  * after a series of grace periods (the "pipeline").
1638  */
1639 static int
1640 rcu_torture_writer(void *arg)
1641 {
1642 	bool booting_still = false;
1643 	bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1644 	unsigned long cookie;
1645 	struct rcu_gp_oldstate cookie_full;
1646 	int expediting = 0;
1647 	unsigned long gp_snap;
1648 	unsigned long gp_snap1;
1649 	struct rcu_gp_oldstate gp_snap_full;
1650 	struct rcu_gp_oldstate gp_snap1_full;
1651 	int i;
1652 	int idx;
1653 	unsigned long j;
1654 	int oldnice = task_nice(current);
1655 	struct rcu_gp_oldstate *rgo = NULL;
1656 	int rgo_size = 0;
1657 	struct rcu_torture *rp;
1658 	struct rcu_torture *old_rp;
1659 	static DEFINE_TORTURE_RANDOM(rand);
1660 	unsigned long stallsdone = jiffies;
1661 	bool stutter_waited;
1662 	unsigned long *ulo = NULL;
1663 	int ulo_size = 0;
1664 
1665 	// If a new stall test is added, this must be adjusted.
1666 	if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu)
1667 		stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) *
1668 			      HZ * (stall_cpu_repeat + 1);
1669 	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1670 	if (!can_expedite)
1671 		pr_alert("%s" TORTURE_FLAG
1672 			 " GP expediting controlled from boot/sysfs for %s.\n",
1673 			 torture_type, cur_ops->name);
1674 	if (WARN_ONCE(nsynctypes == 0,
1675 		      "%s: No update-side primitives.\n", __func__)) {
1676 		/*
1677 		 * No updates primitives, so don't try updating.
1678 		 * The resulting test won't be testing much, hence the
1679 		 * above WARN_ONCE().
1680 		 */
1681 		rcu_torture_writer_state = RTWS_STOPPING;
1682 		torture_kthread_stopping("rcu_torture_writer");
1683 		return 0;
1684 	}
1685 	if (cur_ops->poll_active > 0) {
1686 		ulo = kcalloc(cur_ops->poll_active, sizeof(*ulo), GFP_KERNEL);
1687 		if (!WARN_ON(!ulo))
1688 			ulo_size = cur_ops->poll_active;
1689 	}
1690 	if (cur_ops->poll_active_full > 0) {
1691 		rgo = kzalloc_objs(*rgo, cur_ops->poll_active_full);
1692 		if (!WARN_ON(!rgo))
1693 			rgo_size = cur_ops->poll_active_full;
1694 	}
1695 
1696 	// If the system is still booting, let it finish.
1697 	j = jiffies;
1698 	while (!torture_must_stop() && !rcu_inkernel_boot_has_ended()) {
1699 		booting_still = true;
1700 		schedule_timeout_interruptible(HZ);
1701 	}
1702 	if (booting_still)
1703 		pr_alert("%s" TORTURE_FLAG " Waited %lu jiffies for boot to complete.\n",
1704 			 torture_type, jiffies - j);
1705 
1706 	do {
1707 		rcu_torture_writer_state = RTWS_FIXED_DELAY;
1708 		torture_hrtimeout_us(500, 1000, &rand);
1709 		rp = rcu_torture_alloc();
1710 		if (rp == NULL)
1711 			continue;
1712 		rp->rtort_pipe_count = 0;
1713 		ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
1714 		rcu_torture_writer_state = RTWS_DELAY;
1715 		udelay(torture_random(&rand) & 0x3ff);
1716 		rcu_torture_writer_state = RTWS_REPLACE;
1717 		old_rp = rcu_dereference_check(rcu_torture_current,
1718 					       current == writer_task);
1719 		rp->rtort_mbtest = 1;
1720 		rcu_assign_pointer(rcu_torture_current, rp);
1721 		smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1722 		if (old_rp) {
1723 			i = old_rp->rtort_pipe_count;
1724 			if (i > RCU_TORTURE_PIPE_LEN)
1725 				i = RCU_TORTURE_PIPE_LEN;
1726 			atomic_inc(&rcu_torture_wcount[i]);
1727 			WRITE_ONCE(old_rp->rtort_pipe_count,
1728 				   old_rp->rtort_pipe_count + 1);
1729 			ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count);
1730 
1731 			// Make sure readers block polled grace periods.
1732 			if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1733 				idx = cur_ops->readlock();
1734 				cookie = cur_ops->get_gp_state();
1735 				WARN_ONCE(cur_ops->poll_gp_state(cookie),
1736 					  "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1737 					  __func__,
1738 					  rcu_torture_writer_state_getname(),
1739 					  rcu_torture_writer_state,
1740 					  cookie, cur_ops->get_gp_state());
1741 				if (cur_ops->get_comp_state) {
1742 					cookie = cur_ops->get_comp_state();
1743 					WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
1744 				}
1745 				cur_ops->readunlock(idx);
1746 			}
1747 			if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
1748 				idx = cur_ops->readlock();
1749 				cur_ops->get_gp_state_full(&cookie_full);
1750 				WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
1751 					  "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
1752 					  __func__,
1753 					  rcu_torture_writer_state_getname(),
1754 					  rcu_torture_writer_state,
1755 					  cpumask_pr_args(cpu_online_mask));
1756 				if (cur_ops->get_comp_state_full) {
1757 					cur_ops->get_comp_state_full(&cookie_full);
1758 					WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
1759 				}
1760 				cur_ops->readunlock(idx);
1761 			}
1762 			switch (synctype[torture_random(&rand) % nsynctypes]) {
1763 			case RTWS_DEF_FREE:
1764 				rcu_torture_writer_state = RTWS_DEF_FREE;
1765 				cur_ops->deferred_free(old_rp);
1766 				break;
1767 			case RTWS_EXP_SYNC:
1768 				rcu_torture_writer_state = RTWS_EXP_SYNC;
1769 				do_rtws_sync(&rand, cur_ops->exp_sync);
1770 				rcu_torture_pipe_update(old_rp);
1771 				break;
1772 			case RTWS_COND_GET:
1773 				rcu_torture_writer_state = RTWS_COND_GET;
1774 				gp_snap = cur_ops->get_gp_state();
1775 				torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi,
1776 						     1000, &rand);
1777 				rcu_torture_writer_state = RTWS_COND_SYNC;
1778 				cur_ops->cond_sync(gp_snap);
1779 				rcu_torture_pipe_update(old_rp);
1780 				break;
1781 			case RTWS_COND_GET_EXP:
1782 				rcu_torture_writer_state = RTWS_COND_GET_EXP;
1783 				gp_snap = cur_ops->get_gp_state_exp();
1784 				torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp,
1785 						     1000, &rand);
1786 				rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
1787 				cur_ops->cond_sync_exp(gp_snap);
1788 				rcu_torture_pipe_update(old_rp);
1789 				break;
1790 			case RTWS_COND_GET_FULL:
1791 				rcu_torture_writer_state = RTWS_COND_GET_FULL;
1792 				cur_ops->get_gp_state_full(&gp_snap_full);
1793 				torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi,
1794 						     1000, &rand);
1795 				rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
1796 				cur_ops->cond_sync_full(&gp_snap_full);
1797 				rcu_torture_pipe_update(old_rp);
1798 				break;
1799 			case RTWS_COND_GET_EXP_FULL:
1800 				rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
1801 				cur_ops->get_gp_state_full(&gp_snap_full);
1802 				torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp,
1803 						     1000, &rand);
1804 				rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
1805 				cur_ops->cond_sync_exp_full(&gp_snap_full);
1806 				rcu_torture_pipe_update(old_rp);
1807 				break;
1808 			case RTWS_POLL_GET:
1809 				rcu_torture_writer_state = RTWS_POLL_GET;
1810 				for (i = 0; i < ulo_size; i++)
1811 					ulo[i] = cur_ops->get_comp_state();
1812 				gp_snap = cur_ops->start_gp_poll();
1813 				rcu_torture_writer_state = RTWS_POLL_WAIT;
1814 				if (cur_ops->exp_current && !(torture_random(&rand) & 0xff))
1815 					cur_ops->exp_current();
1816 				while (!cur_ops->poll_gp_state(gp_snap)) {
1817 					gp_snap1 = cur_ops->get_gp_state();
1818 					for (i = 0; i < ulo_size; i++)
1819 						if (cur_ops->poll_gp_state(ulo[i]) ||
1820 						    cur_ops->same_gp_state(ulo[i], gp_snap1)) {
1821 							ulo[i] = gp_snap1;
1822 							break;
1823 						}
1824 					WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size);
1825 					torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi,
1826 							     1000, &rand);
1827 				}
1828 				rcu_torture_pipe_update(old_rp);
1829 				break;
1830 			case RTWS_POLL_GET_FULL:
1831 				rcu_torture_writer_state = RTWS_POLL_GET_FULL;
1832 				for (i = 0; i < rgo_size; i++)
1833 					cur_ops->get_comp_state_full(&rgo[i]);
1834 				cur_ops->start_gp_poll_full(&gp_snap_full);
1835 				rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
1836 				if (cur_ops->exp_current && !(torture_random(&rand) & 0xff))
1837 					cur_ops->exp_current();
1838 				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1839 					cur_ops->get_gp_state_full(&gp_snap1_full);
1840 					for (i = 0; i < rgo_size; i++)
1841 						if (cur_ops->poll_gp_state_full(&rgo[i]) ||
1842 						    cur_ops->same_gp_state_full(&rgo[i],
1843 										&gp_snap1_full)) {
1844 							rgo[i] = gp_snap1_full;
1845 							break;
1846 						}
1847 					WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size);
1848 					torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi,
1849 							     1000, &rand);
1850 				}
1851 				rcu_torture_pipe_update(old_rp);
1852 				break;
1853 			case RTWS_POLL_GET_EXP:
1854 				rcu_torture_writer_state = RTWS_POLL_GET_EXP;
1855 				gp_snap = cur_ops->start_gp_poll_exp();
1856 				rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
1857 				while (!cur_ops->poll_gp_state_exp(gp_snap))
1858 					torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp,
1859 							     1000, &rand);
1860 				rcu_torture_pipe_update(old_rp);
1861 				break;
1862 			case RTWS_POLL_GET_EXP_FULL:
1863 				rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL;
1864 				cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1865 				rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
1866 				while (!cur_ops->poll_gp_state_full(&gp_snap_full))
1867 					torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp,
1868 							     1000, &rand);
1869 				rcu_torture_pipe_update(old_rp);
1870 				break;
1871 			case RTWS_SYNC:
1872 				rcu_torture_writer_state = RTWS_SYNC;
1873 				do_rtws_sync(&rand, cur_ops->sync);
1874 				rcu_torture_pipe_update(old_rp);
1875 				break;
1876 			default:
1877 				WARN_ON_ONCE(1);
1878 				break;
1879 			}
1880 		}
1881 		WRITE_ONCE(rcu_torture_current_version,
1882 			   rcu_torture_current_version + 1);
1883 		/* Cycle through nesting levels of rcu_expedite_gp() calls. */
1884 		if (can_expedite &&
1885 		    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1886 			WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1887 			if (expediting >= 0)
1888 				rcu_expedite_gp();
1889 			else
1890 				rcu_unexpedite_gp();
1891 			if (++expediting > 3)
1892 				expediting = -expediting;
1893 		} else if (!can_expedite) { /* Disabled during boot, recheck. */
1894 			can_expedite = !rcu_gp_is_expedited() &&
1895 				       !rcu_gp_is_normal();
1896 		}
1897 		rcu_torture_writer_state = RTWS_STUTTER;
1898 		stutter_waited = stutter_wait("rcu_torture_writer");
1899 		if (stutter_waited &&
1900 		    !atomic_read(&rcu_fwd_cb_nodelay) &&
1901 		    !cur_ops->slow_gps &&
1902 		    !torture_must_stop() &&
1903 		    time_after(jiffies, stallsdone))
1904 			for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1905 				if (list_empty(&rcu_tortures[i].rtort_free) &&
1906 				    rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) {
1907 					tracing_off();
1908 					if (cur_ops->gp_kthread_dbg)
1909 						cur_ops->gp_kthread_dbg();
1910 					WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1911 					rcu_ftrace_dump(DUMP_ALL);
1912 					break;
1913 				}
1914 		if (stutter_waited)
1915 			sched_set_normal(current, oldnice);
1916 	} while (!torture_must_stop());
1917 	rcu_torture_current = NULL;  // Let stats task know that we are done.
1918 	/* Reset expediting back to unexpedited. */
1919 	if (expediting > 0)
1920 		expediting = -expediting;
1921 	while (can_expedite && expediting++ < 0)
1922 		rcu_unexpedite_gp();
1923 	WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1924 	if (!can_expedite)
1925 		pr_alert("%s" TORTURE_FLAG
1926 			 " Dynamic grace-period expediting was disabled.\n",
1927 			 torture_type);
1928 	kfree(ulo);
1929 	kfree(rgo);
1930 	rcu_torture_writer_state = RTWS_STOPPING;
1931 	torture_kthread_stopping("rcu_torture_writer");
1932 	return 0;
1933 }
1934 
1935 /*
1936  * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
1937  * delay between calls.
1938  */
1939 static int
1940 rcu_torture_fakewriter(void *arg)
1941 {
1942 	unsigned long gp_snap;
1943 	struct rcu_gp_oldstate gp_snap_full;
1944 	DEFINE_TORTURE_RANDOM(rand);
1945 
1946 	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1947 	set_user_nice(current, MAX_NICE);
1948 
1949 	if (WARN_ONCE(nsynctypes == 0,
1950 		      "%s: No update-side primitives.\n", __func__)) {
1951 		/*
1952 		 * No updates primitives, so don't try updating.
1953 		 * The resulting test won't be testing much, hence the
1954 		 * above WARN_ONCE().
1955 		 */
1956 		torture_kthread_stopping("rcu_torture_fakewriter");
1957 		return 0;
1958 	}
1959 
1960 	do {
1961 		torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1962 		if (cur_ops->cb_barrier != NULL &&
1963 		    torture_random(&rand) % (nrealfakewriters * 8) == 0) {
1964 			cur_ops->cb_barrier();
1965 		} else {
1966 			switch (synctype[torture_random(&rand) % nsynctypes]) {
1967 			case RTWS_DEF_FREE:
1968 				break;
1969 			case RTWS_EXP_SYNC:
1970 				cur_ops->exp_sync();
1971 				break;
1972 			case RTWS_COND_GET:
1973 				gp_snap = cur_ops->get_gp_state();
1974 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1975 				cur_ops->cond_sync(gp_snap);
1976 				break;
1977 			case RTWS_COND_GET_EXP:
1978 				gp_snap = cur_ops->get_gp_state_exp();
1979 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1980 				cur_ops->cond_sync_exp(gp_snap);
1981 				break;
1982 			case RTWS_COND_GET_FULL:
1983 				cur_ops->get_gp_state_full(&gp_snap_full);
1984 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1985 				cur_ops->cond_sync_full(&gp_snap_full);
1986 				break;
1987 			case RTWS_COND_GET_EXP_FULL:
1988 				cur_ops->get_gp_state_full(&gp_snap_full);
1989 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1990 				cur_ops->cond_sync_exp_full(&gp_snap_full);
1991 				break;
1992 			case RTWS_POLL_GET:
1993 				if (cur_ops->start_poll_irqsoff)
1994 					local_irq_disable();
1995 				gp_snap = cur_ops->start_gp_poll();
1996 				if (cur_ops->start_poll_irqsoff)
1997 					local_irq_enable();
1998 				while (!cur_ops->poll_gp_state(gp_snap)) {
1999 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
2000 								  &rand);
2001 				}
2002 				break;
2003 			case RTWS_POLL_GET_FULL:
2004 				if (cur_ops->start_poll_irqsoff)
2005 					local_irq_disable();
2006 				cur_ops->start_gp_poll_full(&gp_snap_full);
2007 				if (cur_ops->start_poll_irqsoff)
2008 					local_irq_enable();
2009 				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
2010 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
2011 								  &rand);
2012 				}
2013 				break;
2014 			case RTWS_POLL_GET_EXP:
2015 				gp_snap = cur_ops->start_gp_poll_exp();
2016 				while (!cur_ops->poll_gp_state_exp(gp_snap)) {
2017 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
2018 								  &rand);
2019 				}
2020 				break;
2021 			case RTWS_POLL_GET_EXP_FULL:
2022 				cur_ops->start_gp_poll_exp_full(&gp_snap_full);
2023 				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
2024 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
2025 								  &rand);
2026 				}
2027 				break;
2028 			case RTWS_SYNC:
2029 				cur_ops->sync();
2030 				break;
2031 			default:
2032 				WARN_ON_ONCE(1);
2033 				break;
2034 			}
2035 		}
2036 		stutter_wait("rcu_torture_fakewriter");
2037 	} while (!torture_must_stop());
2038 
2039 	torture_kthread_stopping("rcu_torture_fakewriter");
2040 	return 0;
2041 }
2042 
2043 static void rcu_torture_timer_cb(struct rcu_head *rhp)
2044 {
2045 	kfree(rhp);
2046 }
2047 
2048 // Set up and carry out testing of RCU's global memory ordering
2049 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
2050 					struct torture_random_state *trsp)
2051 {
2052 	unsigned long loops;
2053 	int noc = torture_num_online_cpus();
2054 	int rdrchked;
2055 	int rdrchker;
2056 	struct rcu_torture_reader_check *rtrcp; // Me.
2057 	struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
2058 	struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
2059 	struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
2060 
2061 	if (myid < 0)
2062 		return; // Don't try this from timer handlers.
2063 
2064 	// Increment my counter.
2065 	rtrcp = &rcu_torture_reader_mbchk[myid];
2066 	WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
2067 
2068 	// Attempt to assign someone else some checking work.
2069 	rdrchked = torture_random(trsp) % nrealreaders;
2070 	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
2071 	rdrchker = torture_random(trsp) % nrealreaders;
2072 	rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
2073 	if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
2074 	    smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
2075 	    !READ_ONCE(rtp->rtort_chkp) &&
2076 	    !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
2077 		rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
2078 		WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
2079 		rtrcp->rtc_chkrdr = rdrchked;
2080 		WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
2081 		if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
2082 		    cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
2083 			(void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
2084 	}
2085 
2086 	// If assigned some completed work, do it!
2087 	rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
2088 	if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
2089 		return; // No work or work not yet ready.
2090 	rdrchked = rtrcp_assigner->rtc_chkrdr;
2091 	if (WARN_ON_ONCE(rdrchked < 0))
2092 		return;
2093 	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
2094 	loops = READ_ONCE(rtrcp_chked->rtc_myloops);
2095 	atomic_inc(&n_rcu_torture_mbchk_tries);
2096 	if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
2097 		atomic_inc(&n_rcu_torture_mbchk_fail);
2098 	rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
2099 	rtrcp_assigner->rtc_ready = 0;
2100 	smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
2101 	smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
2102 }
2103 
2104 // Verify the specified RCUTORTURE_RDR* state.
2105 #define ROEC_ARGS "%s %s: Current %#x  To add %#x  To remove %#x  preempt_count() %#x\n", __func__, s, curstate, new, old, preempt_count()
2106 static void rcutorture_one_extend_check(char *s, int curstate, int new, int old)
2107 {
2108 	int mask;
2109 
2110 	if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST_CHK_RDR_STATE) || in_nmi())
2111 		return;
2112 
2113 	WARN_ONCE(!(curstate & RCUTORTURE_RDR_IRQ) && irqs_disabled() && !in_hardirq(), ROEC_ARGS);
2114 	WARN_ONCE((curstate & RCUTORTURE_RDR_IRQ) && !irqs_disabled(), ROEC_ARGS);
2115 
2116 	// If CONFIG_PREEMPT_COUNT=n, further checks are unreliable.
2117 	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
2118 		return;
2119 
2120 	WARN_ONCE((curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) &&
2121 		  !softirq_count(), ROEC_ARGS);
2122 	WARN_ONCE((curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) &&
2123 		  !(preempt_count() & PREEMPT_MASK), ROEC_ARGS);
2124 	WARN_ONCE(cur_ops->readlock_nesting &&
2125 		  (curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) &&
2126 		  cur_ops->readlock_nesting() == 0, ROEC_ARGS);
2127 
2128 	// Interrupt handlers have all sorts of stuff disabled, so ignore
2129 	// unintended disabling.
2130 	if (in_serving_softirq() || in_hardirq())
2131 		return;
2132 
2133 	WARN_ONCE(cur_ops->extendables &&
2134 		  !(curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) &&
2135 		  softirq_count(), ROEC_ARGS);
2136 
2137 	/*
2138 	 * non-preemptible RCU in a preemptible kernel uses preempt_disable()
2139 	 * as rcu_read_lock().
2140 	 */
2141 	mask = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
2142 	if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
2143 		mask |= RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
2144 
2145 	WARN_ONCE(cur_ops->extendables && !(curstate & mask) &&
2146 		  (preempt_count() & PREEMPT_MASK), ROEC_ARGS);
2147 
2148 	/*
2149 	 * non-preemptible RCU in a preemptible kernel uses "preempt_count() &
2150 	 * PREEMPT_MASK" as ->readlock_nesting().
2151 	 */
2152 	mask = RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
2153 	if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
2154 		mask |= RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
2155 
2156 	if (IS_ENABLED(CONFIG_PREEMPT_RT) && softirq_count())
2157 		mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
2158 
2159 	WARN_ONCE(cur_ops->readlock_nesting && !(curstate & mask) &&
2160 		  cur_ops->readlock_nesting() > 0, ROEC_ARGS);
2161 }
2162 
2163 /*
2164  * Do one extension of an RCU read-side critical section using the
2165  * current reader state in readstate (set to zero for initial entry
2166  * to extended critical section), set the new state as specified by
2167  * newstate (set to zero for final exit from extended critical section),
2168  * and random-number-generator state in trsp.  If this is neither the
2169  * beginning or end of the critical section and if there was actually a
2170  * change, do a ->read_delay().
2171  */
2172 static void rcutorture_one_extend(int *readstate, int newstate, struct torture_random_state *trsp,
2173 				  struct rt_read_seg *rtrsp)
2174 {
2175 	bool first;
2176 	unsigned long flags;
2177 	int idxnew1 = -1;
2178 	int idxnew2 = -1;
2179 	int idxold1 = *readstate;
2180 	int idxold2 = idxold1;
2181 	int statesnew = ~*readstate & newstate;
2182 	int statesold = *readstate & ~newstate;
2183 
2184 	first = idxold1 == 0;
2185 	WARN_ON_ONCE(idxold2 < 0);
2186 	WARN_ON_ONCE(idxold2 & ~(RCUTORTURE_RDR_ALLBITS | RCUTORTURE_RDR_UPDOWN));
2187 	rcutorture_one_extend_check("before change", idxold1, statesnew, statesold);
2188 	rtrsp->rt_readstate = newstate;
2189 
2190 	/* First, put new protection in place to avoid critical-section gap. */
2191 	if (statesnew & RCUTORTURE_RDR_BH)
2192 		local_bh_disable();
2193 	if (statesnew & RCUTORTURE_RDR_RBH)
2194 		rcu_read_lock_bh();
2195 	if (statesnew & RCUTORTURE_RDR_IRQ)
2196 		local_irq_disable();
2197 	if (statesnew & RCUTORTURE_RDR_PREEMPT)
2198 		preempt_disable();
2199 	if (statesnew & RCUTORTURE_RDR_SCHED)
2200 		rcu_read_lock_sched();
2201 	if (statesnew & RCUTORTURE_RDR_RCU_1)
2202 		idxnew1 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1;
2203 	if (statesnew & RCUTORTURE_RDR_RCU_2)
2204 		idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2;
2205 
2206 	// Complain unless both the old and the new protection is in place.
2207 	rcutorture_one_extend_check("during change", idxold1 | statesnew, statesnew, statesold);
2208 
2209 	// Sample CPU under both sets of protections to reduce confusion.
2210 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) {
2211 		int cpu = raw_smp_processor_id();
2212 		rtrsp->rt_cpu = cpu;
2213 		if (!first) {
2214 			rtrsp[-1].rt_end_cpu = cpu;
2215 			if (cur_ops->reader_blocked)
2216 				rtrsp[-1].rt_preempted = cur_ops->reader_blocked();
2217 		}
2218 	}
2219 	// Sample grace-period sequence number, as good a place as any.
2220 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP) && cur_ops->gather_gp_seqs) {
2221 		rtrsp->rt_gp_seq = cur_ops->gather_gp_seqs();
2222 		rtrsp->rt_ts = ktime_get_mono_fast_ns();
2223 		if (!first)
2224 			rtrsp[-1].rt_gp_seq_end = rtrsp->rt_gp_seq;
2225 	}
2226 
2227 	/*
2228 	 * Next, remove old protection, in decreasing order of strength
2229 	 * to avoid unlock paths that aren't safe in the stronger
2230 	 * context. Namely: BH can not be enabled with disabled interrupts.
2231 	 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
2232 	 * context.
2233 	 */
2234 	if (statesold & RCUTORTURE_RDR_IRQ)
2235 		local_irq_enable();
2236 	if (statesold & RCUTORTURE_RDR_PREEMPT)
2237 		preempt_enable();
2238 	if (statesold & RCUTORTURE_RDR_SCHED)
2239 		rcu_read_unlock_sched();
2240 	if (statesold & RCUTORTURE_RDR_BH)
2241 		local_bh_enable();
2242 	if (statesold & RCUTORTURE_RDR_RBH)
2243 		rcu_read_unlock_bh();
2244 	if (statesold & RCUTORTURE_RDR_RCU_2) {
2245 		cur_ops->readunlock((idxold2 & RCUTORTURE_RDR_MASK_2) >> RCUTORTURE_RDR_SHIFT_2);
2246 		WARN_ON_ONCE(idxnew2 != -1);
2247 		idxold2 = 0;
2248 	}
2249 	if (statesold & RCUTORTURE_RDR_RCU_1) {
2250 		bool lockit;
2251 
2252 		lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
2253 		if (lockit)
2254 			raw_spin_lock_irqsave(&current->pi_lock, flags);
2255 		cur_ops->readunlock((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1);
2256 		WARN_ON_ONCE(idxnew1 != -1);
2257 		idxold1 = 0;
2258 		if (lockit)
2259 			raw_spin_unlock_irqrestore(&current->pi_lock, flags);
2260 	}
2261 	if (statesold & RCUTORTURE_RDR_UPDOWN) {
2262 		cur_ops->up_read((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1);
2263 		WARN_ON_ONCE(idxnew1 != -1);
2264 		idxold1 = 0;
2265 	}
2266 
2267 	/* Delay if neither beginning nor end and there was a change. */
2268 	if ((statesnew || statesold) && *readstate && newstate)
2269 		cur_ops->read_delay(trsp, rtrsp);
2270 
2271 	/* Update the reader state. */
2272 	if (idxnew1 == -1)
2273 		idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
2274 	WARN_ON_ONCE(idxnew1 < 0);
2275 	if (idxnew2 == -1)
2276 		idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
2277 	WARN_ON_ONCE(idxnew2 < 0);
2278 	*readstate = idxnew1 | idxnew2 | newstate;
2279 	WARN_ON_ONCE(*readstate < 0);
2280 	if (WARN_ON_ONCE(*readstate & ~RCUTORTURE_RDR_ALLBITS))
2281 		pr_info("Unexpected readstate value of %#x\n", *readstate);
2282 	rcutorture_one_extend_check("after change", *readstate, statesnew, statesold);
2283 }
2284 
2285 /* Return the biggest extendables mask given current RCU and boot parameters. */
2286 static int rcutorture_extend_mask_max(void)
2287 {
2288 	int mask;
2289 
2290 	WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
2291 	mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
2292 	mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
2293 	return mask;
2294 }
2295 
2296 /* Return a random protection state mask, but with at least one bit set. */
2297 static int
2298 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
2299 {
2300 	int mask = rcutorture_extend_mask_max();
2301 	unsigned long randmask1 = torture_random(trsp);
2302 	unsigned long randmask2 = randmask1 >> 3;
2303 	unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
2304 	unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
2305 	unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
2306 
2307 	WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);  // Can't have reader idx bits.
2308 	/* Mostly only one bit (need preemption!), sometimes lots of bits. */
2309 	if (!(randmask1 & 0x7))
2310 		mask = mask & randmask2;
2311 	else
2312 		mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
2313 
2314 	// Can't have nested RCU reader without outer RCU reader.
2315 	if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
2316 		if (oldmask & RCUTORTURE_RDR_RCU_1)
2317 			mask &= ~RCUTORTURE_RDR_RCU_2;
2318 		else
2319 			mask |= RCUTORTURE_RDR_RCU_1;
2320 	}
2321 
2322 	/*
2323 	 * Can't enable bh w/irq disabled.
2324 	 */
2325 	if (mask & RCUTORTURE_RDR_IRQ)
2326 		mask |= oldmask & bhs;
2327 
2328 	/*
2329 	 * Ideally these sequences would be detected in debug builds
2330 	 * (regardless of RT), but until then don't stop testing
2331 	 * them on non-RT.
2332 	 */
2333 	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
2334 		/* Can't modify BH in atomic context */
2335 		if (oldmask & preempts_irq)
2336 			mask &= ~bhs;
2337 		if ((oldmask | mask) & preempts_irq)
2338 			mask |= oldmask & bhs;
2339 	}
2340 
2341 	return mask ?: RCUTORTURE_RDR_RCU_1;
2342 }
2343 
2344 /*
2345  * Do a randomly selected number of extensions of an existing RCU read-side
2346  * critical section.
2347  */
2348 static struct rt_read_seg *
2349 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, struct rt_read_seg *rtrsp)
2350 {
2351 	int i;
2352 	int j;
2353 	int mask = rcutorture_extend_mask_max();
2354 
2355 	WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
2356 	if (!((mask - 1) & mask))
2357 		return rtrsp;  /* Current RCU reader not extendable. */
2358 	/* Bias towards larger numbers of loops. */
2359 	i = torture_random(trsp);
2360 	i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
2361 	for (j = 0; j < i; j++) {
2362 		mask = rcutorture_extend_mask(*readstate, trsp);
2363 		WARN_ON_ONCE(mask & RCUTORTURE_RDR_UPDOWN);
2364 		rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
2365 	}
2366 	return &rtrsp[j];
2367 }
2368 
2369 struct rcu_torture_one_read_state {
2370 	bool checkpolling;
2371 	unsigned long cookie;
2372 	struct rcu_gp_oldstate cookie_full;
2373 	unsigned long started;
2374 	struct rcu_torture *p;
2375 	int readstate;
2376 	struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS];
2377 	struct rt_read_seg *rtrsp;
2378 	unsigned long long ts;
2379 };
2380 
2381 static void init_rcu_torture_one_read_state(struct rcu_torture_one_read_state *rtorsp,
2382 					    struct torture_random_state *trsp)
2383 {
2384 	memset(rtorsp, 0, sizeof(*rtorsp));
2385 	rtorsp->checkpolling = !(torture_random(trsp) & 0xfff);
2386 	rtorsp->rtrsp = &rtorsp->rtseg[0];
2387 }
2388 
2389 /*
2390  * Set up the first segment of a series of overlapping read-side
2391  * critical sections.  The caller must have actually initiated the
2392  * outermost read-side critical section.
2393  */
2394 static bool rcu_torture_one_read_start(struct rcu_torture_one_read_state *rtorsp,
2395 				       struct torture_random_state *trsp, long myid)
2396 {
2397 	if (rtorsp->checkpolling) {
2398 		if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2399 			rtorsp->cookie = cur_ops->get_gp_state();
2400 		if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2401 			cur_ops->get_gp_state_full(&rtorsp->cookie_full);
2402 	}
2403 	rtorsp->started = cur_ops->get_gp_seq();
2404 	rtorsp->ts = rcu_trace_clock_local();
2405 	rtorsp->p = rcu_dereference_check(rcu_torture_current,
2406 					  !cur_ops->readlock_held || cur_ops->readlock_held() ||
2407 					  (rtorsp->readstate & RCUTORTURE_RDR_UPDOWN));
2408 	if (rtorsp->p == NULL) {
2409 		/* Wait for rcu_torture_writer to get underway */
2410 		rcutorture_one_extend(&rtorsp->readstate, 0, trsp, rtorsp->rtrsp);
2411 		return false;
2412 	}
2413 	if (rtorsp->p->rtort_mbtest == 0)
2414 		atomic_inc(&n_rcu_torture_mberror);
2415 	rcu_torture_reader_do_mbchk(myid, rtorsp->p, trsp);
2416 	return true;
2417 }
2418 
2419 /*
2420  * Complete the last segment of a series of overlapping read-side
2421  * critical sections and check for errors.
2422  */
2423 static void rcu_torture_one_read_end(struct rcu_torture_one_read_state *rtorsp,
2424 				     struct torture_random_state *trsp)
2425 {
2426 	int i;
2427 	unsigned long completed;
2428 	int pipe_count;
2429 	bool preempted = false;
2430 	struct rt_read_seg *rtrsp1;
2431 
2432 	preempt_disable();
2433 	pipe_count = READ_ONCE(rtorsp->p->rtort_pipe_count);
2434 	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
2435 		// Should not happen in a correct RCU implementation,
2436 		// happens quite often for torture_type=busted.
2437 		pipe_count = RCU_TORTURE_PIPE_LEN;
2438 	}
2439 	completed = cur_ops->get_gp_seq();
2440 	if (pipe_count > 1) {
2441 		do_trace_rcu_torture_read(cur_ops->name, &rtorsp->p->rtort_rcu,
2442 					  rtorsp->ts, rtorsp->started, completed);
2443 		rcu_ftrace_dump(DUMP_ALL);
2444 	}
2445 	__this_cpu_inc(rcu_torture_count[pipe_count]);
2446 	completed = rcutorture_seq_diff(completed, rtorsp->started);
2447 	if (completed > RCU_TORTURE_PIPE_LEN) {
2448 		/* Should not happen, but... */
2449 		completed = RCU_TORTURE_PIPE_LEN;
2450 	}
2451 	__this_cpu_inc(rcu_torture_batch[completed]);
2452 	preempt_enable();
2453 	if (rtorsp->checkpolling) {
2454 		if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2455 			WARN_ONCE(cur_ops->poll_gp_state(rtorsp->cookie),
2456 				  "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
2457 				  __func__,
2458 				  rcu_torture_writer_state_getname(),
2459 				  rcu_torture_writer_state,
2460 				  rtorsp->cookie, cur_ops->get_gp_state());
2461 		if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2462 			WARN_ONCE(cur_ops->poll_gp_state_full(&rtorsp->cookie_full),
2463 				  "%s: Cookie check 6 failed %s(%d) online %*pbl\n",
2464 				  __func__,
2465 				  rcu_torture_writer_state_getname(),
2466 				  rcu_torture_writer_state,
2467 				  cpumask_pr_args(cpu_online_mask));
2468 	}
2469 	if (cur_ops->reader_blocked)
2470 		preempted = cur_ops->reader_blocked();
2471 	rcutorture_one_extend(&rtorsp->readstate, 0, trsp, rtorsp->rtrsp);
2472 	WARN_ON_ONCE(rtorsp->readstate);
2473 	// This next splat is expected behavior if leakpointer, especially
2474 	// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
2475 	WARN_ON_ONCE(leakpointer && READ_ONCE(rtorsp->p->rtort_pipe_count) > 1);
2476 
2477 	/* If error or close call, record the sequence of reader protections. */
2478 	if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
2479 		i = 0;
2480 		for (rtrsp1 = &rtorsp->rtseg[0]; rtrsp1 < rtorsp->rtrsp; rtrsp1++)
2481 			err_segs[i++] = *rtrsp1;
2482 		rt_read_nsegs = i;
2483 		rt_read_preempted = preempted;
2484 	}
2485 }
2486 
2487 /*
2488  * Do one read-side critical section, returning false if there was
2489  * no data to read.  Can be invoked both from process context and
2490  * from a timer handler.
2491  */
2492 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
2493 {
2494 	int newstate;
2495 	struct rcu_torture_one_read_state rtors;
2496 
2497 	WARN_ON_ONCE(!rcu_is_watching());
2498 	init_rcu_torture_one_read_state(&rtors, trsp);
2499 	newstate = rcutorture_extend_mask(rtors.readstate, trsp);
2500 	WARN_ON_ONCE(newstate & RCUTORTURE_RDR_UPDOWN);
2501 	rcutorture_one_extend(&rtors.readstate, newstate, trsp, rtors.rtrsp++);
2502 	if (!rcu_torture_one_read_start(&rtors, trsp, myid))
2503 		return false;
2504 	rtors.rtrsp = rcutorture_loop_extend(&rtors.readstate, trsp, rtors.rtrsp);
2505 	rcu_torture_one_read_end(&rtors, trsp);
2506 	return true;
2507 }
2508 
2509 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
2510 
2511 /*
2512  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
2513  * incrementing the corresponding element of the pipeline array.  The
2514  * counter in the element should never be greater than 1, otherwise, the
2515  * RCU implementation is broken.
2516  */
2517 static void rcu_torture_timer(struct timer_list *unused)
2518 {
2519 	WARN_ON_ONCE(!in_serving_softirq());
2520 	WARN_ON_ONCE(in_hardirq());
2521 	WARN_ON_ONCE(in_nmi());
2522 	atomic_long_inc(&n_rcu_torture_timers);
2523 	(void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
2524 
2525 	/* Test call_rcu() invocation from interrupt handler. */
2526 	if (cur_ops->call) {
2527 		struct rcu_head *rhp = kmalloc_obj(*rhp, GFP_NOWAIT);
2528 
2529 		if (rhp)
2530 			cur_ops->call(rhp, rcu_torture_timer_cb);
2531 	}
2532 }
2533 
2534 /*
2535  * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
2536  * incrementing the corresponding element of the pipeline array.  The
2537  * counter in the element should never be greater than 1, otherwise, the
2538  * RCU implementation is broken.
2539  */
2540 static int
2541 rcu_torture_reader(void *arg)
2542 {
2543 	unsigned long lastsleep = jiffies;
2544 	long myid = (long)arg;
2545 	int mynumonline = myid;
2546 	DEFINE_TORTURE_RANDOM(rand);
2547 	struct timer_list t;
2548 
2549 	VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
2550 	set_user_nice(current, MAX_NICE);
2551 	if (irqreader && cur_ops->irq_capable)
2552 		timer_setup_on_stack(&t, rcu_torture_timer, 0);
2553 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);  // CPU bound, so need tick.
2554 	do {
2555 		if (irqreader && cur_ops->irq_capable) {
2556 			if (!timer_pending(&t))
2557 				mod_timer(&t, jiffies + 1);
2558 		}
2559 		if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
2560 			schedule_timeout_interruptible(HZ);
2561 		if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
2562 			torture_hrtimeout_us(500, 1000, &rand);
2563 			lastsleep = jiffies + 10;
2564 		}
2565 		while (!torture_must_stop() &&
2566 		       (torture_num_online_cpus() < mynumonline || !rcu_inkernel_boot_has_ended()))
2567 			schedule_timeout_interruptible(HZ / 5);
2568 		stutter_wait("rcu_torture_reader");
2569 	} while (!torture_must_stop());
2570 	if (irqreader && cur_ops->irq_capable) {
2571 		timer_delete_sync(&t);
2572 		timer_destroy_on_stack(&t);
2573 	}
2574 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2575 	torture_kthread_stopping("rcu_torture_reader");
2576 	return 0;
2577 }
2578 
2579 struct rcu_torture_one_read_state_updown {
2580 	struct hrtimer rtorsu_hrt;
2581 	bool rtorsu_inuse;
2582 	ktime_t rtorsu_kt;
2583 	int rtorsu_cpu;
2584 	unsigned long rtorsu_j;
2585 	unsigned long rtorsu_ndowns;
2586 	unsigned long rtorsu_nups;
2587 	unsigned long rtorsu_nmigrates;
2588 	struct torture_random_state rtorsu_trs;
2589 	struct rcu_torture_one_read_state rtorsu_rtors;
2590 };
2591 
2592 static struct rcu_torture_one_read_state_updown *updownreaders;
2593 static DEFINE_TORTURE_RANDOM(rcu_torture_updown_rand);
2594 static int rcu_torture_updown(void *arg);
2595 
2596 static enum hrtimer_restart rcu_torture_updown_hrt(struct hrtimer *hrtp)
2597 {
2598 	int cpu = raw_smp_processor_id();
2599 	struct rcu_torture_one_read_state_updown *rtorsup;
2600 
2601 	rtorsup = container_of(hrtp, struct rcu_torture_one_read_state_updown, rtorsu_hrt);
2602 	rcu_torture_one_read_end(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs);
2603 	WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders);
2604 	WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1);
2605 	WRITE_ONCE(rtorsup->rtorsu_nmigrates,
2606 		   rtorsup->rtorsu_nmigrates + (cpu != rtorsup->rtorsu_cpu));
2607 	smp_store_release(&rtorsup->rtorsu_inuse, false);
2608 	return HRTIMER_NORESTART;
2609 }
2610 
2611 static int rcu_torture_updown_init(void)
2612 {
2613 	int i;
2614 	struct torture_random_state *rand = &rcu_torture_updown_rand;
2615 	int ret;
2616 
2617 	if (n_up_down < 0)
2618 		return 0;
2619 	if (!srcu_torture_have_up_down()) {
2620 		VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Disabling up/down reader tests due to lack of primitives");
2621 		return 0;
2622 	}
2623 	updownreaders = kzalloc_objs(*updownreaders, n_up_down);
2624 	if (!updownreaders) {
2625 		VERBOSE_TOROUT_STRING("rcu_torture_updown_init: Out of memory, disabling up/down reader tests");
2626 		return -ENOMEM;
2627 	}
2628 	for (i = 0; i < n_up_down; i++) {
2629 		init_rcu_torture_one_read_state(&updownreaders[i].rtorsu_rtors, rand);
2630 		hrtimer_setup(&updownreaders[i].rtorsu_hrt, rcu_torture_updown_hrt, CLOCK_MONOTONIC,
2631 			      HRTIMER_MODE_REL | HRTIMER_MODE_HARD);
2632 		torture_random_init(&updownreaders[i].rtorsu_trs);
2633 		init_rcu_torture_one_read_state(&updownreaders[i].rtorsu_rtors,
2634 						&updownreaders[i].rtorsu_trs);
2635 	}
2636 	ret = torture_create_kthread(rcu_torture_updown, rand, updown_task);
2637 	if (ret) {
2638 		kfree(updownreaders);
2639 		updownreaders = NULL;
2640 	}
2641 	return ret;
2642 }
2643 
2644 static void rcu_torture_updown_cleanup(void)
2645 {
2646 	struct rcu_torture_one_read_state_updown *rtorsup;
2647 
2648 	for (rtorsup = updownreaders; rtorsup < &updownreaders[n_up_down]; rtorsup++) {
2649 		if (!smp_load_acquire(&rtorsup->rtorsu_inuse))
2650 			continue;
2651 		if (hrtimer_cancel(&rtorsup->rtorsu_hrt) || WARN_ON_ONCE(rtorsup->rtorsu_inuse)) {
2652 			rcu_torture_one_read_end(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs);
2653 			WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders);
2654 			WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1);
2655 			smp_store_release(&rtorsup->rtorsu_inuse, false);
2656 		}
2657 
2658 	}
2659 	kfree(updownreaders);
2660 	updownreaders = NULL;
2661 }
2662 
2663 // Do one reader for rcu_torture_updown().
2664 static void rcu_torture_updown_one(struct rcu_torture_one_read_state_updown *rtorsup)
2665 {
2666 	int idx;
2667 	int rawidx;
2668 	ktime_t t;
2669 
2670 	init_rcu_torture_one_read_state(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs);
2671 	rawidx = cur_ops->down_read();
2672 	WRITE_ONCE(rtorsup->rtorsu_ndowns, rtorsup->rtorsu_ndowns + 1);
2673 	idx = (rawidx << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1;
2674 	rtorsup->rtorsu_rtors.readstate = idx | RCUTORTURE_RDR_UPDOWN;
2675 	rtorsup->rtorsu_rtors.rtrsp++;
2676 	rtorsup->rtorsu_cpu = raw_smp_processor_id();
2677 	if (!rcu_torture_one_read_start(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs, -1)) {
2678 		WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders);
2679 		WRITE_ONCE(rtorsup->rtorsu_nups, rtorsup->rtorsu_nups + 1);
2680 		schedule_timeout_idle(HZ);
2681 		return;
2682 	}
2683 	smp_store_release(&rtorsup->rtorsu_inuse, true);
2684 	t = torture_random(&rtorsup->rtorsu_trs) & 0xfffff; // One per million.
2685 	if (t < 10 * 1000)
2686 		t = 200 * 1000 * 1000;
2687 	hrtimer_start(&rtorsup->rtorsu_hrt, t, HRTIMER_MODE_REL | HRTIMER_MODE_HARD);
2688 	smp_mb(); // Sample jiffies after posting hrtimer.
2689 	rtorsup->rtorsu_j = jiffies;  // Not used by hrtimer handler.
2690 	rtorsup->rtorsu_kt = t;
2691 }
2692 
2693 /*
2694  * RCU torture up/down reader kthread, starting RCU readers in kthread
2695  * context and ending them in hrtimer handlers.  Otherwise similar to
2696  * rcu_torture_reader().
2697  */
2698 static int
2699 rcu_torture_updown(void *arg)
2700 {
2701 	unsigned long j;
2702 	struct rcu_torture_one_read_state_updown *rtorsup;
2703 
2704 	VERBOSE_TOROUT_STRING("rcu_torture_updown task started");
2705 	do {
2706 		for (rtorsup = updownreaders; rtorsup < &updownreaders[n_up_down]; rtorsup++) {
2707 			if (torture_must_stop())
2708 				break;
2709 			j = smp_load_acquire(&jiffies); // Time before ->rtorsu_inuse.
2710 			if (smp_load_acquire(&rtorsup->rtorsu_inuse)) {
2711 				WARN_ONCE(time_after(j, rtorsup->rtorsu_j + 1 + HZ * 10),
2712 					  "hrtimer queued at jiffies %lu for %lld ns took %lu jiffies\n", rtorsup->rtorsu_j, rtorsup->rtorsu_kt, j - rtorsup->rtorsu_j);
2713 				continue;
2714 			}
2715 			rcu_torture_updown_one(rtorsup);
2716 		}
2717 		torture_hrtimeout_ms(1, 1000, &rcu_torture_updown_rand);
2718 		stutter_wait("rcu_torture_updown");
2719 	} while (!torture_must_stop());
2720 	rcu_torture_updown_cleanup();
2721 	torture_kthread_stopping("rcu_torture_updown");
2722 	return 0;
2723 }
2724 
2725 /*
2726  * Randomly Toggle CPUs' callback-offload state.  This uses hrtimers to
2727  * increase race probabilities and fuzzes the interval between toggling.
2728  */
2729 static int rcu_nocb_toggle(void *arg)
2730 {
2731 	int cpu;
2732 	int maxcpu = -1;
2733 	int oldnice = task_nice(current);
2734 	long r;
2735 	DEFINE_TORTURE_RANDOM(rand);
2736 	ktime_t toggle_delay;
2737 	unsigned long toggle_fuzz;
2738 	ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
2739 
2740 	VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
2741 	while (!rcu_inkernel_boot_has_ended())
2742 		schedule_timeout_interruptible(HZ / 10);
2743 	for_each_possible_cpu(cpu)
2744 		maxcpu = cpu;
2745 	WARN_ON(maxcpu < 0);
2746 	if (toggle_interval > ULONG_MAX)
2747 		toggle_fuzz = ULONG_MAX >> 3;
2748 	else
2749 		toggle_fuzz = toggle_interval >> 3;
2750 	if (toggle_fuzz <= 0)
2751 		toggle_fuzz = NSEC_PER_USEC;
2752 	do {
2753 		r = torture_random(&rand);
2754 		cpu = (r >> 1) % (maxcpu + 1);
2755 		if (r & 0x1) {
2756 			rcu_nocb_cpu_offload(cpu);
2757 			atomic_long_inc(&n_nocb_offload);
2758 		} else {
2759 			rcu_nocb_cpu_deoffload(cpu);
2760 			atomic_long_inc(&n_nocb_deoffload);
2761 		}
2762 		toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
2763 		set_current_state(TASK_INTERRUPTIBLE);
2764 		schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
2765 		if (stutter_wait("rcu_nocb_toggle"))
2766 			sched_set_normal(current, oldnice);
2767 	} while (!torture_must_stop());
2768 	torture_kthread_stopping("rcu_nocb_toggle");
2769 	return 0;
2770 }
2771 
2772 /*
2773  * Print torture statistics.  Caller must ensure that there is only
2774  * one call to this function at a given time!!!  This is normally
2775  * accomplished by relying on the module system to only have one copy
2776  * of the module loaded, and then by giving the rcu_torture_stats
2777  * kthread full control (or the init/cleanup functions when rcu_torture_stats
2778  * thread is not running).
2779  */
2780 static void
2781 rcu_torture_stats_print(void)
2782 {
2783 	int cpu;
2784 	int i;
2785 	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2786 	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2787 	long n_gpwraps = 0;
2788 	unsigned long ndowns = 0;
2789 	unsigned long nunexpired = 0;
2790 	unsigned long nmigrates = 0;
2791 	unsigned long nups = 0;
2792 	struct rcu_torture *rtcp;
2793 	static unsigned long rtcv_snap = ULONG_MAX;
2794 	static bool splatted;
2795 	struct task_struct *wtp;
2796 
2797 	for_each_possible_cpu(cpu) {
2798 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2799 			pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
2800 			batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
2801 		}
2802 		if (cur_ops->get_gpwrap_count)
2803 			n_gpwraps += cur_ops->get_gpwrap_count(cpu);
2804 	}
2805 	if (updownreaders) {
2806 		for (i = 0; i < n_up_down; i++) {
2807 			ndowns += READ_ONCE(updownreaders[i].rtorsu_ndowns);
2808 			nups += READ_ONCE(updownreaders[i].rtorsu_nups);
2809 			nunexpired += READ_ONCE(updownreaders[i].rtorsu_inuse);
2810 			nmigrates += READ_ONCE(updownreaders[i].rtorsu_nmigrates);
2811 		}
2812 	}
2813 	for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) {
2814 		if (pipesummary[i] != 0)
2815 			break;
2816 	} // The value of variable "i" is used later, so don't clobber it!
2817 
2818 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2819 	rtcp = rcu_access_pointer(rcu_torture_current);
2820 	pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
2821 		rtcp,
2822 		rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
2823 		rcu_torture_current_version,
2824 		list_empty(&rcu_torture_freelist),
2825 		atomic_read(&n_rcu_torture_alloc),
2826 		atomic_read(&n_rcu_torture_alloc_fail),
2827 		atomic_read(&n_rcu_torture_free));
2828 	pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ",
2829 		atomic_read(&n_rcu_torture_mberror),
2830 		atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
2831 		n_rcu_torture_barrier_error,
2832 		n_rcu_torture_boost_ktrerror);
2833 	pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
2834 		n_rcu_torture_boost_failure,
2835 		n_rcu_torture_boosts,
2836 		atomic_long_read(&n_rcu_torture_timers));
2837 	if (updownreaders)
2838 		pr_cont("ndowns: %lu nups: %lu nhrt: %lu nmigrates: %lu ", ndowns, nups, nunexpired,  nmigrates);
2839 	torture_onoff_stats();
2840 	pr_cont("barrier: %ld/%ld:%ld ",
2841 		data_race(n_barrier_successes),
2842 		data_race(n_barrier_attempts),
2843 		data_race(n_rcu_torture_barrier_error));
2844 	pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
2845 	pr_cont("nocb-toggles: %ld:%ld ",
2846 		atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
2847 	pr_cont("gpwraps: %ld\n", n_gpwraps);
2848 
2849 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2850 	if (atomic_read(&n_rcu_torture_mberror) ||
2851 	    atomic_read(&n_rcu_torture_mbchk_fail) ||
2852 	    n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
2853 	    n_rcu_torture_boost_failure || i > 1) {
2854 		pr_cont("%s", "!!! ");
2855 		atomic_inc(&n_rcu_torture_error);
2856 		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
2857 		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
2858 		WARN_ON_ONCE(n_rcu_torture_barrier_error);  // rcu_barrier()
2859 		WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
2860 		WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
2861 		WARN_ON_ONCE(i > 1); // Too-short grace period
2862 	}
2863 	pr_cont("Reader Pipe: ");
2864 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2865 		pr_cont(" %ld", pipesummary[i]);
2866 	pr_cont("\n");
2867 
2868 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2869 	pr_cont("Reader Batch: ");
2870 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2871 		pr_cont(" %ld", batchsummary[i]);
2872 	pr_cont("\n");
2873 
2874 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2875 	pr_cont("Free-Block Circulation: ");
2876 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2877 		pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
2878 	}
2879 	pr_cont("\n");
2880 
2881 	if (cur_ops->stats)
2882 		cur_ops->stats();
2883 	if (rtcv_snap == rcu_torture_current_version &&
2884 	    rcu_access_pointer(rcu_torture_current) &&
2885 	    !rcu_stall_is_suppressed() &&
2886 	    rcu_inkernel_boot_has_ended()) {
2887 		int __maybe_unused flags = 0;
2888 		unsigned long __maybe_unused gp_seq = 0;
2889 
2890 		if (cur_ops->get_gp_data)
2891 			cur_ops->get_gp_data(&flags, &gp_seq);
2892 		wtp = READ_ONCE(writer_task);
2893 		pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
2894 			 rcu_torture_writer_state_getname(),
2895 			 rcu_torture_writer_state, gp_seq, flags,
2896 			 wtp == NULL ? ~0U : wtp->__state,
2897 			 wtp == NULL ? -1 : (int)task_cpu(wtp));
2898 		if (!splatted && wtp) {
2899 			sched_show_task(wtp);
2900 			splatted = true;
2901 		}
2902 		if (cur_ops->gp_kthread_dbg)
2903 			cur_ops->gp_kthread_dbg();
2904 		rcu_ftrace_dump(DUMP_ALL);
2905 	}
2906 	rtcv_snap = rcu_torture_current_version;
2907 }
2908 
2909 /*
2910  * Periodically prints torture statistics, if periodic statistics printing
2911  * was specified via the stat_interval module parameter.
2912  */
2913 static int
2914 rcu_torture_stats(void *arg)
2915 {
2916 	VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
2917 	do {
2918 		schedule_timeout_interruptible(stat_interval * HZ);
2919 		rcu_torture_stats_print();
2920 		torture_shutdown_absorb("rcu_torture_stats");
2921 	} while (!torture_must_stop());
2922 	torture_kthread_stopping("rcu_torture_stats");
2923 	return 0;
2924 }
2925 
2926 /* Test mem_dump_obj() and friends.  */
2927 static void rcu_torture_mem_dump_obj(void)
2928 {
2929 	struct rcu_head *rhp;
2930 	struct kmem_cache *kcp;
2931 	static int z;
2932 
2933 	kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
2934 	if (WARN_ON_ONCE(!kcp))
2935 		return;
2936 	rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
2937 	if (WARN_ON_ONCE(!rhp)) {
2938 		kmem_cache_destroy(kcp);
2939 		return;
2940 	}
2941 	pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
2942 	pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
2943 	mem_dump_obj(ZERO_SIZE_PTR);
2944 	pr_alert("mem_dump_obj(NULL):");
2945 	mem_dump_obj(NULL);
2946 	pr_alert("mem_dump_obj(%px):", &rhp);
2947 	mem_dump_obj(&rhp);
2948 	pr_alert("mem_dump_obj(%px):", rhp);
2949 	mem_dump_obj(rhp);
2950 	pr_alert("mem_dump_obj(%px):", &rhp->func);
2951 	mem_dump_obj(&rhp->func);
2952 	pr_alert("mem_dump_obj(%px):", &z);
2953 	mem_dump_obj(&z);
2954 	kmem_cache_free(kcp, rhp);
2955 	kmem_cache_destroy(kcp);
2956 	rhp = kmalloc_obj(*rhp);
2957 	if (WARN_ON_ONCE(!rhp))
2958 		return;
2959 	pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2960 	pr_alert("mem_dump_obj(kmalloc %px):", rhp);
2961 	mem_dump_obj(rhp);
2962 	pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
2963 	mem_dump_obj(&rhp->func);
2964 	kfree(rhp);
2965 	rhp = vmalloc(4096);
2966 	if (WARN_ON_ONCE(!rhp))
2967 		return;
2968 	pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2969 	pr_alert("mem_dump_obj(vmalloc %px):", rhp);
2970 	mem_dump_obj(rhp);
2971 	pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
2972 	mem_dump_obj(&rhp->func);
2973 	vfree(rhp);
2974 }
2975 
2976 static void
2977 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
2978 {
2979 	pr_alert("%s" TORTURE_FLAG
2980 		 "--- %s: nreaders=%d nfakewriters=%d "
2981 		 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
2982 		 "shuffle_interval=%d stutter=%d irqreader=%d "
2983 		 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
2984 		 "test_boost=%d/%d test_boost_interval=%d "
2985 		 "test_boost_duration=%d test_boost_holdoff=%d shutdown_secs=%d "
2986 		 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
2987 		 "stall_cpu_block=%d stall_cpu_repeat=%d "
2988 		 "n_barrier_cbs=%d "
2989 		 "onoff_interval=%d onoff_holdoff=%d "
2990 		 "read_exit_delay=%d read_exit_burst=%d "
2991 		 "reader_flavor=%x "
2992 		 "nocbs_nthreads=%d nocbs_toggle=%d "
2993 		 "test_nmis=%d "
2994 		 "preempt_duration=%d preempt_interval=%d n_up_down=%d\n",
2995 		 torture_type, tag, nrealreaders, nrealfakewriters,
2996 		 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
2997 		 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
2998 		 test_boost, cur_ops->can_boost,
2999 		 test_boost_interval, test_boost_duration, test_boost_holdoff, shutdown_secs,
3000 		 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
3001 		 stall_cpu_block, stall_cpu_repeat,
3002 		 n_barrier_cbs,
3003 		 onoff_interval, onoff_holdoff,
3004 		 read_exit_delay, read_exit_burst,
3005 		 reader_flavor,
3006 		 nocbs_nthreads, nocbs_toggle,
3007 		 test_nmis,
3008 		 preempt_duration, preempt_interval, n_up_down);
3009 }
3010 
3011 static int rcutorture_booster_cleanup(unsigned int cpu)
3012 {
3013 	struct task_struct *t;
3014 
3015 	if (boost_tasks[cpu] == NULL)
3016 		return 0;
3017 	mutex_lock(&boost_mutex);
3018 	t = boost_tasks[cpu];
3019 	boost_tasks[cpu] = NULL;
3020 	rcu_torture_enable_rt_throttle();
3021 	mutex_unlock(&boost_mutex);
3022 
3023 	/* This must be outside of the mutex, otherwise deadlock! */
3024 	torture_stop_kthread(rcu_torture_boost, t);
3025 	return 0;
3026 }
3027 
3028 static int rcutorture_booster_init(unsigned int cpu)
3029 {
3030 	int retval;
3031 
3032 	if (boost_tasks[cpu] != NULL)
3033 		return 0;  /* Already created, nothing more to do. */
3034 
3035 	// Testing RCU priority boosting requires rcutorture do
3036 	// some serious abuse.  Counter this by running ksoftirqd
3037 	// at higher priority.
3038 	if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
3039 		struct sched_param sp;
3040 		struct task_struct *t;
3041 
3042 		t = per_cpu(ksoftirqd, cpu);
3043 		WARN_ON_ONCE(!t);
3044 		sp.sched_priority = 2;
3045 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3046 #ifdef CONFIG_IRQ_FORCED_THREADING
3047 		if (force_irqthreads()) {
3048 			t = per_cpu(ktimerd, cpu);
3049 			WARN_ON_ONCE(!t);
3050 			sp.sched_priority = 2;
3051 			sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3052 		}
3053 #endif
3054 	}
3055 
3056 	/* Don't allow time recalculation while creating a new task. */
3057 	mutex_lock(&boost_mutex);
3058 	rcu_torture_disable_rt_throttle();
3059 	VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
3060 	boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
3061 					      cpu, "rcu_torture_boost_%u");
3062 	if (IS_ERR(boost_tasks[cpu])) {
3063 		retval = PTR_ERR(boost_tasks[cpu]);
3064 		VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
3065 		n_rcu_torture_boost_ktrerror++;
3066 		boost_tasks[cpu] = NULL;
3067 		mutex_unlock(&boost_mutex);
3068 		return retval;
3069 	}
3070 	mutex_unlock(&boost_mutex);
3071 	return 0;
3072 }
3073 
3074 static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr)
3075 {
3076 	pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr);
3077 	return NOTIFY_OK;
3078 }
3079 
3080 static struct notifier_block rcu_torture_stall_block = {
3081 	.notifier_call = rcu_torture_stall_nf,
3082 };
3083 
3084 /*
3085  * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
3086  * induces a CPU stall for the time specified by stall_cpu.  If a new
3087  * stall test is added, stallsdone in rcu_torture_writer() must be adjusted.
3088  */
3089 static void rcu_torture_stall_one(int rep, int irqsoff)
3090 {
3091 	int idx;
3092 	unsigned long stop_at;
3093 
3094 	if (stall_cpu_holdoff > 0) {
3095 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
3096 		schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
3097 		VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
3098 	}
3099 	if (!kthread_should_stop() && stall_gp_kthread > 0) {
3100 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
3101 		rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
3102 		for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
3103 			if (kthread_should_stop())
3104 				break;
3105 			schedule_timeout_uninterruptible(HZ);
3106 		}
3107 	}
3108 	if (!kthread_should_stop() && stall_cpu > 0) {
3109 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
3110 		stop_at = ktime_get_seconds() + stall_cpu;
3111 		/* RCU CPU stall is expected behavior in following code. */
3112 		idx = cur_ops->readlock();
3113 		if (irqsoff)
3114 			local_irq_disable();
3115 		else if (!stall_cpu_block)
3116 			preempt_disable();
3117 		pr_alert("%s start stall episode %d on CPU %d.\n",
3118 			  __func__, rep + 1, raw_smp_processor_id());
3119 		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) &&
3120 		       !kthread_should_stop())
3121 			if (stall_cpu_block) {
3122 #ifdef CONFIG_PREEMPTION
3123 				preempt_schedule();
3124 #else
3125 				schedule_timeout_uninterruptible(HZ);
3126 #endif
3127 			} else if (stall_no_softlockup) {
3128 				touch_softlockup_watchdog();
3129 			}
3130 		if (irqsoff)
3131 			local_irq_enable();
3132 		else if (!stall_cpu_block)
3133 			preempt_enable();
3134 		cur_ops->readunlock(idx);
3135 	}
3136 }
3137 
3138 /*
3139  * CPU-stall kthread.  Invokes rcu_torture_stall_one() once, and then as many
3140  * additional times as specified by the stall_cpu_repeat module parameter.
3141  * Note that stall_cpu_irqsoff is ignored on the second and subsequent
3142  * stall.
3143  */
3144 static int rcu_torture_stall(void *args)
3145 {
3146 	int i;
3147 	int repeat = stall_cpu_repeat;
3148 	int ret;
3149 
3150 	VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
3151 	if (repeat < 0) {
3152 		repeat = 0;
3153 		WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST));
3154 	}
3155 	if (rcu_cpu_stall_notifiers) {
3156 		ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
3157 		if (ret)
3158 			pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
3159 				__func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
3160 	}
3161 	for (i = 0; i <= repeat; i++) {
3162 		if (kthread_should_stop())
3163 			break;
3164 		rcu_torture_stall_one(i, i == 0 ? stall_cpu_irqsoff : 0);
3165 	}
3166 	pr_alert("%s end.\n", __func__);
3167 	if (rcu_cpu_stall_notifiers && !ret) {
3168 		ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block);
3169 		if (ret)
3170 			pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret);
3171 	}
3172 	torture_shutdown_absorb("rcu_torture_stall");
3173 	while (!kthread_should_stop())
3174 		schedule_timeout_interruptible(10 * HZ);
3175 	return 0;
3176 }
3177 
3178 /* Spawn CPU-stall kthread, if stall_cpu specified. */
3179 static int __init rcu_torture_stall_init(void)
3180 {
3181 	if (stall_cpu <= 0 && stall_gp_kthread <= 0)
3182 		return 0;
3183 	return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
3184 }
3185 
3186 /* State structure for forward-progress self-propagating RCU callback. */
3187 struct fwd_cb_state {
3188 	struct rcu_head rh;
3189 	int stop;
3190 };
3191 
3192 /*
3193  * Forward-progress self-propagating RCU callback function.  Because
3194  * callbacks run from softirq, this function is an implicit RCU read-side
3195  * critical section.
3196  */
3197 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
3198 {
3199 	struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
3200 
3201 	if (READ_ONCE(fcsp->stop)) {
3202 		WRITE_ONCE(fcsp->stop, 2);
3203 		return;
3204 	}
3205 	cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
3206 }
3207 
3208 /* State for continuous-flood RCU callbacks. */
3209 struct rcu_fwd_cb {
3210 	struct rcu_head rh;
3211 	struct rcu_fwd_cb *rfc_next;
3212 	struct rcu_fwd *rfc_rfp;
3213 	int rfc_gps;
3214 };
3215 
3216 #define MAX_FWD_CB_JIFFIES	(8 * HZ) /* Maximum CB test duration. */
3217 #define MIN_FWD_CB_LAUNDERS	3	/* This many CB invocations to count. */
3218 #define MIN_FWD_CBS_LAUNDERED	100	/* Number of counted CBs. */
3219 #define FWD_CBS_HIST_DIV	10	/* Histogram buckets/second. */
3220 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
3221 
3222 struct rcu_launder_hist {
3223 	long n_launders;
3224 	unsigned long launder_gp_seq;
3225 };
3226 
3227 struct rcu_fwd {
3228 	spinlock_t rcu_fwd_lock;
3229 	struct rcu_fwd_cb *rcu_fwd_cb_head;
3230 	struct rcu_fwd_cb **rcu_fwd_cb_tail;
3231 	long n_launders_cb;
3232 	unsigned long rcu_fwd_startat;
3233 	struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
3234 	unsigned long rcu_launder_gp_seq_start;
3235 	int rcu_fwd_id;
3236 };
3237 
3238 static DEFINE_MUTEX(rcu_fwd_mutex);
3239 static struct rcu_fwd *rcu_fwds;
3240 static unsigned long rcu_fwd_seq;
3241 static atomic_long_t rcu_fwd_max_cbs;
3242 static bool rcu_fwd_emergency_stop;
3243 
3244 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
3245 {
3246 	unsigned long gps;
3247 	unsigned long gps_old;
3248 	int i;
3249 	int j;
3250 
3251 	for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
3252 		if (rfp->n_launders_hist[i].n_launders > 0)
3253 			break;
3254 	pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
3255 		 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
3256 	gps_old = rfp->rcu_launder_gp_seq_start;
3257 	for (j = 0; j <= i; j++) {
3258 		gps = rfp->n_launders_hist[j].launder_gp_seq;
3259 		pr_cont(" %ds/%d: %ld:%ld",
3260 			j + 1, FWD_CBS_HIST_DIV,
3261 			rfp->n_launders_hist[j].n_launders,
3262 			rcutorture_seq_diff(gps, gps_old));
3263 		gps_old = gps;
3264 	}
3265 	pr_cont("\n");
3266 }
3267 
3268 /* Callback function for continuous-flood RCU callbacks. */
3269 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
3270 {
3271 	unsigned long flags;
3272 	int i;
3273 	struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
3274 	struct rcu_fwd_cb **rfcpp;
3275 	struct rcu_fwd *rfp = rfcp->rfc_rfp;
3276 
3277 	rfcp->rfc_next = NULL;
3278 	rfcp->rfc_gps++;
3279 	spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
3280 	rfcpp = rfp->rcu_fwd_cb_tail;
3281 	rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
3282 	smp_store_release(rfcpp, rfcp);
3283 	WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
3284 	i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
3285 	if (i >= ARRAY_SIZE(rfp->n_launders_hist))
3286 		i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
3287 	rfp->n_launders_hist[i].n_launders++;
3288 	rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
3289 	spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
3290 }
3291 
3292 // Give the scheduler a chance, even on nohz_full CPUs.
3293 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
3294 {
3295 	if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
3296 		// Real call_rcu() floods hit userspace, so emulate that.
3297 		if (need_resched() || (iter & 0xfff))
3298 			schedule();
3299 		return;
3300 	}
3301 	// No userspace emulation: CB invocation throttles call_rcu()
3302 	cond_resched();
3303 }
3304 
3305 /*
3306  * Free all callbacks on the rcu_fwd_cb_head list, either because the
3307  * test is over or because we hit an OOM event.
3308  */
3309 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
3310 {
3311 	unsigned long flags;
3312 	unsigned long freed = 0;
3313 	struct rcu_fwd_cb *rfcp;
3314 
3315 	for (;;) {
3316 		spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
3317 		rfcp = rfp->rcu_fwd_cb_head;
3318 		if (!rfcp) {
3319 			spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
3320 			break;
3321 		}
3322 		rfp->rcu_fwd_cb_head = rfcp->rfc_next;
3323 		if (!rfp->rcu_fwd_cb_head)
3324 			rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
3325 		spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
3326 		kfree(rfcp);
3327 		freed++;
3328 		rcu_torture_fwd_prog_cond_resched(freed);
3329 		if (tick_nohz_full_enabled()) {
3330 			local_irq_save(flags);
3331 			rcu_momentary_eqs();
3332 			local_irq_restore(flags);
3333 		}
3334 	}
3335 	return freed;
3336 }
3337 
3338 /* Carry out need_resched()/cond_resched() forward-progress testing. */
3339 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
3340 				    int *tested, int *tested_tries)
3341 {
3342 	unsigned long cver;
3343 	unsigned long dur;
3344 	struct fwd_cb_state fcs;
3345 	unsigned long gps;
3346 	int idx;
3347 	int sd;
3348 	int sd4;
3349 	bool selfpropcb = false;
3350 	unsigned long stopat;
3351 	static DEFINE_TORTURE_RANDOM(trs);
3352 
3353 	pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
3354 	if (!cur_ops->sync)
3355 		return; // Cannot do need_resched() forward progress testing without ->sync.
3356 	if (cur_ops->call && cur_ops->cb_barrier) {
3357 		init_rcu_head_on_stack(&fcs.rh);
3358 		selfpropcb = true;
3359 	}
3360 
3361 	/* Tight loop containing cond_resched(). */
3362 	atomic_inc(&rcu_fwd_cb_nodelay);
3363 	cur_ops->sync(); /* Later readers see above write. */
3364 	if  (selfpropcb) {
3365 		WRITE_ONCE(fcs.stop, 0);
3366 		cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
3367 	}
3368 	cver = READ_ONCE(rcu_torture_current_version);
3369 	gps = cur_ops->get_gp_seq();
3370 	sd = cur_ops->stall_dur() + 1;
3371 	sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
3372 	dur = sd4 + torture_random(&trs) % (sd - sd4);
3373 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
3374 	stopat = rfp->rcu_fwd_startat + dur;
3375 	while (time_before(jiffies, stopat) &&
3376 	       !shutdown_time_arrived() &&
3377 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
3378 		idx = cur_ops->readlock();
3379 		udelay(10);
3380 		cur_ops->readunlock(idx);
3381 		if (!fwd_progress_need_resched || need_resched())
3382 			cond_resched();
3383 	}
3384 	(*tested_tries)++;
3385 	if (!time_before(jiffies, stopat) &&
3386 	    !shutdown_time_arrived() &&
3387 	    !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
3388 		(*tested)++;
3389 		cver = READ_ONCE(rcu_torture_current_version) - cver;
3390 		gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
3391 		WARN_ON(!cver && gps < 2);
3392 		pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
3393 			 rfp->rcu_fwd_id, dur, cver, gps);
3394 	}
3395 	if (selfpropcb) {
3396 		WRITE_ONCE(fcs.stop, 1);
3397 		cur_ops->sync(); /* Wait for running CB to complete. */
3398 		pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
3399 		cur_ops->cb_barrier(); /* Wait for queued callbacks. */
3400 	}
3401 
3402 	if (selfpropcb) {
3403 		WARN_ON(READ_ONCE(fcs.stop) != 2);
3404 		destroy_rcu_head_on_stack(&fcs.rh);
3405 	}
3406 	schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
3407 	atomic_dec(&rcu_fwd_cb_nodelay);
3408 }
3409 
3410 /* Carry out call_rcu() forward-progress testing. */
3411 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
3412 {
3413 	unsigned long cver;
3414 	unsigned long flags;
3415 	unsigned long gps;
3416 	int i;
3417 	long n_launders;
3418 	long n_launders_cb_snap;
3419 	long n_launders_sa;
3420 	long n_max_cbs;
3421 	long n_max_gps;
3422 	struct rcu_fwd_cb *rfcp;
3423 	struct rcu_fwd_cb *rfcpn;
3424 	unsigned long stopat;
3425 	unsigned long stoppedat;
3426 
3427 	pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
3428 	if (READ_ONCE(rcu_fwd_emergency_stop))
3429 		return; /* Get out of the way quickly, no GP wait! */
3430 	if (!cur_ops->call)
3431 		return; /* Can't do call_rcu() fwd prog without ->call. */
3432 
3433 	/* Loop continuously posting RCU callbacks. */
3434 	atomic_inc(&rcu_fwd_cb_nodelay);
3435 	cur_ops->sync(); /* Later readers see above write. */
3436 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
3437 	stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
3438 	n_launders = 0;
3439 	rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
3440 	n_launders_sa = 0;
3441 	n_max_cbs = 0;
3442 	n_max_gps = 0;
3443 	for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
3444 		rfp->n_launders_hist[i].n_launders = 0;
3445 	cver = READ_ONCE(rcu_torture_current_version);
3446 	gps = cur_ops->get_gp_seq();
3447 	rfp->rcu_launder_gp_seq_start = gps;
3448 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);  // CPU bound, so need tick.
3449 	while (time_before(jiffies, stopat) &&
3450 	       !shutdown_time_arrived() &&
3451 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
3452 		rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
3453 		rfcpn = NULL;
3454 		if (rfcp)
3455 			rfcpn = READ_ONCE(rfcp->rfc_next);
3456 		if (rfcpn) {
3457 			if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
3458 			    ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
3459 				break;
3460 			rfp->rcu_fwd_cb_head = rfcpn;
3461 			n_launders++;
3462 			n_launders_sa++;
3463 		} else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
3464 			rfcp = kmalloc_obj(*rfcp);
3465 			if (WARN_ON_ONCE(!rfcp)) {
3466 				schedule_timeout_interruptible(1);
3467 				continue;
3468 			}
3469 			n_max_cbs++;
3470 			n_launders_sa = 0;
3471 			rfcp->rfc_gps = 0;
3472 			rfcp->rfc_rfp = rfp;
3473 		} else {
3474 			rfcp = NULL;
3475 		}
3476 		if (rfcp)
3477 			cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
3478 		rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
3479 		if (tick_nohz_full_enabled()) {
3480 			local_irq_save(flags);
3481 			rcu_momentary_eqs();
3482 			local_irq_restore(flags);
3483 		}
3484 	}
3485 	stoppedat = jiffies;
3486 	n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
3487 	cver = READ_ONCE(rcu_torture_current_version) - cver;
3488 	gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
3489 	pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
3490 	cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
3491 	(void)rcu_torture_fwd_prog_cbfree(rfp);
3492 
3493 	if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
3494 	    !shutdown_time_arrived()) {
3495 		if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg)
3496 			cur_ops->gp_kthread_dbg();
3497 		pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld #online %u\n",
3498 			 __func__,
3499 			 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
3500 			 n_launders + n_max_cbs - n_launders_cb_snap,
3501 			 n_launders, n_launders_sa,
3502 			 n_max_gps, n_max_cbs, cver, gps, num_online_cpus());
3503 		atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
3504 		mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
3505 		rcu_torture_fwd_cb_hist(rfp);
3506 		mutex_unlock(&rcu_fwd_mutex);
3507 	}
3508 	schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
3509 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
3510 	atomic_dec(&rcu_fwd_cb_nodelay);
3511 }
3512 
3513 
3514 /*
3515  * OOM notifier, but this only prints diagnostic information for the
3516  * current forward-progress test.
3517  */
3518 static int rcutorture_oom_notify(struct notifier_block *self,
3519 				 unsigned long notused, void *nfreed)
3520 {
3521 	int i;
3522 	long ncbs;
3523 	struct rcu_fwd *rfp;
3524 
3525 	mutex_lock(&rcu_fwd_mutex);
3526 	rfp = rcu_fwds;
3527 	if (!rfp) {
3528 		mutex_unlock(&rcu_fwd_mutex);
3529 		return NOTIFY_OK;
3530 	}
3531 	WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
3532 	     __func__);
3533 	for (i = 0; i < fwd_progress; i++) {
3534 		rcu_torture_fwd_cb_hist(&rfp[i]);
3535 		rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
3536 	}
3537 	WRITE_ONCE(rcu_fwd_emergency_stop, true);
3538 	smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
3539 	ncbs = 0;
3540 	for (i = 0; i < fwd_progress; i++)
3541 		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
3542 	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
3543 	cur_ops->cb_barrier();
3544 	ncbs = 0;
3545 	for (i = 0; i < fwd_progress; i++)
3546 		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
3547 	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
3548 	cur_ops->cb_barrier();
3549 	ncbs = 0;
3550 	for (i = 0; i < fwd_progress; i++)
3551 		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
3552 	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
3553 	smp_mb(); /* Frees before return to avoid redoing OOM. */
3554 	(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
3555 	pr_info("%s returning after OOM processing.\n", __func__);
3556 	mutex_unlock(&rcu_fwd_mutex);
3557 	return NOTIFY_OK;
3558 }
3559 
3560 static struct notifier_block rcutorture_oom_nb = {
3561 	.notifier_call = rcutorture_oom_notify
3562 };
3563 
3564 /* Carry out grace-period forward-progress testing. */
3565 static int rcu_torture_fwd_prog(void *args)
3566 {
3567 	bool firsttime = true;
3568 	long max_cbs;
3569 	int oldnice = task_nice(current);
3570 	unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
3571 	struct rcu_fwd *rfp = args;
3572 	int tested = 0;
3573 	int tested_tries = 0;
3574 
3575 	VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
3576 	while (!rcu_inkernel_boot_has_ended())
3577 		schedule_timeout_interruptible(HZ / 10);
3578 	rcu_bind_current_to_nocb();
3579 	if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
3580 		set_user_nice(current, MAX_NICE);
3581 	do {
3582 		if (!rfp->rcu_fwd_id) {
3583 			schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
3584 			WRITE_ONCE(rcu_fwd_emergency_stop, false);
3585 			if (!firsttime) {
3586 				max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
3587 				pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
3588 			}
3589 			firsttime = false;
3590 			WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
3591 		} else {
3592 			while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
3593 				schedule_timeout_interruptible(HZ / 20);
3594 			oldseq = READ_ONCE(rcu_fwd_seq);
3595 		}
3596 		pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
3597 		if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
3598 			rcu_torture_fwd_prog_cr(rfp);
3599 		if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
3600 		    (!IS_ENABLED(CONFIG_TINY_RCU) ||
3601 		     (rcu_inkernel_boot_has_ended() &&
3602 		      torture_num_online_cpus() > rfp->rcu_fwd_id)))
3603 			rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
3604 
3605 		/* Avoid slow periods, better to test when busy. */
3606 		if (stutter_wait("rcu_torture_fwd_prog"))
3607 			sched_set_normal(current, oldnice);
3608 	} while (!torture_must_stop());
3609 	/* Short runs might not contain a valid forward-progress attempt. */
3610 	if (!rfp->rcu_fwd_id) {
3611 		WARN_ON(!tested && tested_tries >= 5);
3612 		pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
3613 	}
3614 	torture_kthread_stopping("rcu_torture_fwd_prog");
3615 	return 0;
3616 }
3617 
3618 /* If forward-progress checking is requested and feasible, spawn the thread. */
3619 static int __init rcu_torture_fwd_prog_init(void)
3620 {
3621 	int i;
3622 	int ret = 0;
3623 	struct rcu_fwd *rfp;
3624 
3625 	if (!fwd_progress)
3626 		return 0; /* Not requested, so don't do it. */
3627 	if (fwd_progress >= nr_cpu_ids) {
3628 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
3629 		fwd_progress = nr_cpu_ids;
3630 	} else if (fwd_progress < 0) {
3631 		fwd_progress = nr_cpu_ids;
3632 	}
3633 	if ((!cur_ops->sync && !cur_ops->call) ||
3634 	    (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
3635 	    cur_ops == &rcu_busted_ops) {
3636 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
3637 		fwd_progress = 0;
3638 		return 0;
3639 	}
3640 	if (stall_cpu > 0 || (preempt_duration > 0 && IS_ENABLED(CONFIG_RCU_NOCB_CPU))) {
3641 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall and/or preemption testing");
3642 		fwd_progress = 0;
3643 		if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
3644 			return -EINVAL; /* In module, can fail back to user. */
3645 		WARN_ON(1); /* Make sure rcutorture scripting notices conflict. */
3646 		return 0;
3647 	}
3648 	if (fwd_progress_holdoff <= 0)
3649 		fwd_progress_holdoff = 1;
3650 	if (fwd_progress_div <= 0)
3651 		fwd_progress_div = 4;
3652 	rfp = kzalloc_objs(*rfp, fwd_progress);
3653 	fwd_prog_tasks = kzalloc_objs(*fwd_prog_tasks, fwd_progress);
3654 	if (!rfp || !fwd_prog_tasks) {
3655 		kfree(rfp);
3656 		kfree(fwd_prog_tasks);
3657 		fwd_prog_tasks = NULL;
3658 		fwd_progress = 0;
3659 		return -ENOMEM;
3660 	}
3661 	for (i = 0; i < fwd_progress; i++) {
3662 		spin_lock_init(&rfp[i].rcu_fwd_lock);
3663 		rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
3664 		rfp[i].rcu_fwd_id = i;
3665 	}
3666 	mutex_lock(&rcu_fwd_mutex);
3667 	rcu_fwds = rfp;
3668 	mutex_unlock(&rcu_fwd_mutex);
3669 	register_oom_notifier(&rcutorture_oom_nb);
3670 	for (i = 0; i < fwd_progress; i++) {
3671 		ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
3672 		if (ret) {
3673 			fwd_progress = i;
3674 			return ret;
3675 		}
3676 	}
3677 	return 0;
3678 }
3679 
3680 static void rcu_torture_fwd_prog_cleanup(void)
3681 {
3682 	int i;
3683 	struct rcu_fwd *rfp;
3684 
3685 	if (!rcu_fwds || !fwd_prog_tasks)
3686 		return;
3687 	for (i = 0; i < fwd_progress; i++)
3688 		torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
3689 	unregister_oom_notifier(&rcutorture_oom_nb);
3690 	mutex_lock(&rcu_fwd_mutex);
3691 	rfp = rcu_fwds;
3692 	rcu_fwds = NULL;
3693 	mutex_unlock(&rcu_fwd_mutex);
3694 	kfree(rfp);
3695 	kfree(fwd_prog_tasks);
3696 	fwd_prog_tasks = NULL;
3697 }
3698 
3699 /* Callback function for RCU barrier testing. */
3700 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
3701 {
3702 	atomic_inc(&barrier_cbs_invoked);
3703 }
3704 
3705 /* IPI handler to get callback posted on desired CPU, if online. */
3706 static int rcu_torture_barrier1cb(void *rcu_void)
3707 {
3708 	struct rcu_head *rhp = rcu_void;
3709 
3710 	cur_ops->call(rhp, rcu_torture_barrier_cbf);
3711 	return 0;
3712 }
3713 
3714 /* kthread function to register callbacks used to test RCU barriers. */
3715 static int rcu_torture_barrier_cbs(void *arg)
3716 {
3717 	long myid = (long)arg;
3718 	bool lastphase = false;
3719 	bool newphase;
3720 	struct rcu_head rcu;
3721 
3722 	init_rcu_head_on_stack(&rcu);
3723 	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
3724 	set_user_nice(current, MAX_NICE);
3725 	do {
3726 		wait_event(barrier_cbs_wq[myid],
3727 			   (newphase =
3728 			    smp_load_acquire(&barrier_phase)) != lastphase ||
3729 			   torture_must_stop());
3730 		lastphase = newphase;
3731 		if (torture_must_stop())
3732 			break;
3733 		/*
3734 		 * The above smp_load_acquire() ensures barrier_phase load
3735 		 * is ordered before the following ->call().
3736 		 */
3737 		if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1))
3738 			cur_ops->call(&rcu, rcu_torture_barrier_cbf);
3739 
3740 		if (atomic_dec_and_test(&barrier_cbs_count))
3741 			wake_up(&barrier_wq);
3742 	} while (!torture_must_stop());
3743 	if (cur_ops->cb_barrier != NULL)
3744 		cur_ops->cb_barrier();
3745 	destroy_rcu_head_on_stack(&rcu);
3746 	torture_kthread_stopping("rcu_torture_barrier_cbs");
3747 	return 0;
3748 }
3749 
3750 /* kthread function to drive and coordinate RCU barrier testing. */
3751 static int rcu_torture_barrier(void *arg)
3752 {
3753 	int i;
3754 
3755 	VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
3756 	do {
3757 		atomic_set(&barrier_cbs_invoked, 0);
3758 		atomic_set(&barrier_cbs_count, n_barrier_cbs);
3759 		/* Ensure barrier_phase ordered after prior assignments. */
3760 		smp_store_release(&barrier_phase, !barrier_phase);
3761 		for (i = 0; i < n_barrier_cbs; i++)
3762 			wake_up(&barrier_cbs_wq[i]);
3763 		wait_event(barrier_wq,
3764 			   atomic_read(&barrier_cbs_count) == 0 ||
3765 			   torture_must_stop());
3766 		if (torture_must_stop())
3767 			break;
3768 		n_barrier_attempts++;
3769 		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
3770 		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
3771 			n_rcu_torture_barrier_error++;
3772 			pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
3773 			       atomic_read(&barrier_cbs_invoked),
3774 			       n_barrier_cbs);
3775 			WARN_ON(1);
3776 			// Wait manually for the remaining callbacks
3777 			i = 0;
3778 			do {
3779 				if (WARN_ON(i++ > HZ))
3780 					i = INT_MIN;
3781 				schedule_timeout_interruptible(1);
3782 				cur_ops->cb_barrier();
3783 			} while (atomic_read(&barrier_cbs_invoked) !=
3784 				 n_barrier_cbs &&
3785 				 !torture_must_stop());
3786 			smp_mb(); // Can't trust ordering if broken.
3787 			if (!torture_must_stop())
3788 				pr_err("Recovered: barrier_cbs_invoked = %d\n",
3789 				       atomic_read(&barrier_cbs_invoked));
3790 		} else {
3791 			n_barrier_successes++;
3792 		}
3793 		schedule_timeout_interruptible(HZ / 10);
3794 	} while (!torture_must_stop());
3795 	torture_kthread_stopping("rcu_torture_barrier");
3796 	return 0;
3797 }
3798 
3799 /* Initialize RCU barrier testing. */
3800 static int rcu_torture_barrier_init(void)
3801 {
3802 	int i;
3803 	int ret;
3804 
3805 	if (n_barrier_cbs <= 0)
3806 		return 0;
3807 	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
3808 		pr_alert("%s" TORTURE_FLAG
3809 			 " Call or barrier ops missing for %s,\n",
3810 			 torture_type, cur_ops->name);
3811 		pr_alert("%s" TORTURE_FLAG
3812 			 " RCU barrier testing omitted from run.\n",
3813 			 torture_type);
3814 		return 0;
3815 	}
3816 	atomic_set(&barrier_cbs_count, 0);
3817 	atomic_set(&barrier_cbs_invoked, 0);
3818 	barrier_cbs_tasks =
3819 		kzalloc_objs(barrier_cbs_tasks[0], n_barrier_cbs);
3820 	barrier_cbs_wq =
3821 		kzalloc_objs(barrier_cbs_wq[0], n_barrier_cbs);
3822 	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
3823 		return -ENOMEM;
3824 	for (i = 0; i < n_barrier_cbs; i++) {
3825 		init_waitqueue_head(&barrier_cbs_wq[i]);
3826 		ret = torture_create_kthread(rcu_torture_barrier_cbs,
3827 					     (void *)(long)i,
3828 					     barrier_cbs_tasks[i]);
3829 		if (ret)
3830 			return ret;
3831 	}
3832 	return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
3833 }
3834 
3835 /* Clean up after RCU barrier testing. */
3836 static void rcu_torture_barrier_cleanup(void)
3837 {
3838 	int i;
3839 
3840 	torture_stop_kthread(rcu_torture_barrier, barrier_task);
3841 	if (barrier_cbs_tasks != NULL) {
3842 		for (i = 0; i < n_barrier_cbs; i++)
3843 			torture_stop_kthread(rcu_torture_barrier_cbs,
3844 					     barrier_cbs_tasks[i]);
3845 		kfree(barrier_cbs_tasks);
3846 		barrier_cbs_tasks = NULL;
3847 	}
3848 	if (barrier_cbs_wq != NULL) {
3849 		kfree(barrier_cbs_wq);
3850 		barrier_cbs_wq = NULL;
3851 	}
3852 }
3853 
3854 static bool rcu_torture_can_boost(void)
3855 {
3856 	static int boost_warn_once;
3857 	int prio;
3858 
3859 	if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
3860 		return false;
3861 	if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
3862 		return false;
3863 
3864 	prio = rcu_get_gp_kthreads_prio();
3865 	if (!prio)
3866 		return false;
3867 
3868 	if (prio < 2) {
3869 		if (boost_warn_once == 1)
3870 			return false;
3871 
3872 		pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
3873 		boost_warn_once = 1;
3874 		return false;
3875 	}
3876 
3877 	return true;
3878 }
3879 
3880 static bool read_exit_child_stop;
3881 static bool read_exit_child_stopped;
3882 static wait_queue_head_t read_exit_wq;
3883 
3884 // Child kthread which just does an rcutorture reader and exits.
3885 static int rcu_torture_read_exit_child(void *trsp_in)
3886 {
3887 	struct torture_random_state *trsp = trsp_in;
3888 
3889 	set_user_nice(current, MAX_NICE);
3890 	// Minimize time between reading and exiting.
3891 	while (!kthread_should_stop())
3892 		schedule_timeout_uninterruptible(HZ / 20);
3893 	(void)rcu_torture_one_read(trsp, -1);
3894 	return 0;
3895 }
3896 
3897 // Parent kthread which creates and destroys read-exit child kthreads.
3898 static int rcu_torture_read_exit(void *unused)
3899 {
3900 	bool errexit = false;
3901 	int i;
3902 	struct task_struct *tsp;
3903 	DEFINE_TORTURE_RANDOM(trs);
3904 
3905 	// Allocate and initialize.
3906 	set_user_nice(current, MAX_NICE);
3907 	VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
3908 
3909 	// Each pass through this loop does one read-exit episode.
3910 	do {
3911 		VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
3912 		for (i = 0; i < read_exit_burst; i++) {
3913 			if (READ_ONCE(read_exit_child_stop))
3914 				break;
3915 			stutter_wait("rcu_torture_read_exit");
3916 			// Spawn child.
3917 			tsp = kthread_run(rcu_torture_read_exit_child,
3918 					  &trs, "%s", "rcu_torture_read_exit_child");
3919 			if (IS_ERR(tsp)) {
3920 				TOROUT_ERRSTRING("out of memory");
3921 				errexit = true;
3922 				break;
3923 			}
3924 			cond_resched();
3925 			kthread_stop(tsp);
3926 			n_read_exits++;
3927 		}
3928 		VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
3929 		rcu_barrier(); // Wait for task_struct free, avoid OOM.
3930 		i = 0;
3931 		for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++)
3932 			schedule_timeout_uninterruptible(HZ);
3933 	} while (!errexit && !READ_ONCE(read_exit_child_stop));
3934 
3935 	// Clean up and exit.
3936 	smp_store_release(&read_exit_child_stopped, true); // After reaping.
3937 	smp_mb(); // Store before wakeup.
3938 	wake_up(&read_exit_wq);
3939 	while (!torture_must_stop())
3940 		schedule_timeout_uninterruptible(HZ / 20);
3941 	torture_kthread_stopping("rcu_torture_read_exit");
3942 	return 0;
3943 }
3944 
3945 static int rcu_torture_read_exit_init(void)
3946 {
3947 	if (read_exit_burst <= 0)
3948 		return 0;
3949 	init_waitqueue_head(&read_exit_wq);
3950 	read_exit_child_stop = false;
3951 	read_exit_child_stopped = false;
3952 	return torture_create_kthread(rcu_torture_read_exit, NULL,
3953 				      read_exit_task);
3954 }
3955 
3956 static void rcu_torture_read_exit_cleanup(void)
3957 {
3958 	if (!read_exit_task)
3959 		return;
3960 	WRITE_ONCE(read_exit_child_stop, true);
3961 	smp_mb(); // Above write before wait.
3962 	wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
3963 	torture_stop_kthread(rcutorture_read_exit, read_exit_task);
3964 }
3965 
3966 static void rcutorture_test_nmis(int n)
3967 {
3968 #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3969 	int cpu;
3970 	int dumpcpu;
3971 	int i;
3972 
3973 	for (i = 0; i < n; i++) {
3974 		preempt_disable();
3975 		cpu = smp_processor_id();
3976 		dumpcpu = cpu + 1;
3977 		if (dumpcpu >= nr_cpu_ids)
3978 			dumpcpu = 0;
3979 		pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu);
3980 		dump_cpu_task(dumpcpu);
3981 		preempt_enable();
3982 		schedule_timeout_uninterruptible(15 * HZ);
3983 	}
3984 #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3985 	WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis);
3986 #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3987 }
3988 
3989 // Randomly preempt online CPUs.
3990 static int rcu_torture_preempt(void *unused)
3991 {
3992 	int cpu = -1;
3993 	DEFINE_TORTURE_RANDOM(rand);
3994 
3995 	schedule_timeout_idle(stall_cpu_holdoff);
3996 	do {
3997 		// Wait for preempt_interval ms with up to 100us fuzz.
3998 		torture_hrtimeout_ms(preempt_interval, 100, &rand);
3999 		// Select online CPU.
4000 		cpu = cpumask_next(cpu, cpu_online_mask);
4001 		if (cpu >= nr_cpu_ids)
4002 			cpu = cpumask_next(-1, cpu_online_mask);
4003 		WARN_ON_ONCE(cpu >= nr_cpu_ids);
4004 		// Move to that CPU, if can't do so, retry later.
4005 		if (torture_sched_setaffinity(current->pid, cpumask_of(cpu), false))
4006 			continue;
4007 		// Preempt at high-ish priority, then reset to normal.
4008 		sched_set_fifo(current);
4009 		torture_sched_setaffinity(current->pid, cpu_present_mask, true);
4010 		mdelay(preempt_duration);
4011 		sched_set_normal(current, 0);
4012 		stutter_wait("rcu_torture_preempt");
4013 	} while (!torture_must_stop());
4014 	torture_kthread_stopping("rcu_torture_preempt");
4015 	return 0;
4016 }
4017 
4018 static enum cpuhp_state rcutor_hp;
4019 
4020 static struct hrtimer gpwrap_lag_timer;
4021 static bool gpwrap_lag_active;
4022 
4023 /* Timer handler for toggling RCU grace-period sequence overflow test lag value */
4024 static enum hrtimer_restart rcu_gpwrap_lag_timer(struct hrtimer *timer)
4025 {
4026 	ktime_t next_delay;
4027 
4028 	if (gpwrap_lag_active) {
4029 		pr_alert("rcu-torture: Disabling gpwrap lag (value=0)\n");
4030 		cur_ops->set_gpwrap_lag(0);
4031 		gpwrap_lag_active = false;
4032 		next_delay = ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0);
4033 	} else {
4034 		pr_alert("rcu-torture: Enabling gpwrap lag (value=%d)\n", gpwrap_lag_gps);
4035 		cur_ops->set_gpwrap_lag(gpwrap_lag_gps);
4036 		gpwrap_lag_active = true;
4037 		next_delay = ktime_set(gpwrap_lag_active_mins * 60, 0);
4038 	}
4039 
4040 	if (torture_must_stop_irq())
4041 		return HRTIMER_NORESTART;
4042 
4043 	hrtimer_forward_now(timer, next_delay);
4044 	return HRTIMER_RESTART;
4045 }
4046 
4047 static int rcu_gpwrap_lag_init(void)
4048 {
4049 	if (!gpwrap_lag)
4050 		return 0;
4051 
4052 	if (gpwrap_lag_cycle_mins <= 0 || gpwrap_lag_active_mins <= 0) {
4053 		pr_alert("rcu-torture: lag timing parameters must be positive\n");
4054 		return -EINVAL;
4055 	}
4056 
4057 	hrtimer_setup(&gpwrap_lag_timer, rcu_gpwrap_lag_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4058 	gpwrap_lag_active = false;
4059 	hrtimer_start(&gpwrap_lag_timer,
4060 		      ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0), HRTIMER_MODE_REL);
4061 
4062 	return 0;
4063 }
4064 
4065 static void rcu_gpwrap_lag_cleanup(void)
4066 {
4067 	hrtimer_cancel(&gpwrap_lag_timer);
4068 	cur_ops->set_gpwrap_lag(0);
4069 	gpwrap_lag_active = false;
4070 }
4071 static void
4072 rcu_torture_cleanup(void)
4073 {
4074 	int firsttime;
4075 	int flags = 0;
4076 	unsigned long gp_seq = 0;
4077 	int i;
4078 	int j;
4079 
4080 	if (torture_cleanup_begin()) {
4081 		if (cur_ops->cb_barrier != NULL) {
4082 			pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
4083 			cur_ops->cb_barrier();
4084 		}
4085 		if (cur_ops->gp_slow_unregister)
4086 			cur_ops->gp_slow_unregister(NULL);
4087 		return;
4088 	}
4089 	if (!cur_ops) {
4090 		torture_cleanup_end();
4091 		return;
4092 	}
4093 
4094 	rcutorture_test_nmis(test_nmis);
4095 
4096 	if (cur_ops->gp_kthread_dbg)
4097 		cur_ops->gp_kthread_dbg();
4098 	torture_stop_kthread(rcu_torture_preempt, preempt_task);
4099 	rcu_torture_read_exit_cleanup();
4100 	rcu_torture_barrier_cleanup();
4101 	rcu_torture_fwd_prog_cleanup();
4102 	torture_stop_kthread(rcu_torture_stall, stall_task);
4103 	torture_stop_kthread(rcu_torture_writer, writer_task);
4104 
4105 	if (nocb_tasks) {
4106 		for (i = 0; i < nrealnocbers; i++)
4107 			torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
4108 		kfree(nocb_tasks);
4109 		nocb_tasks = NULL;
4110 	}
4111 
4112 	if (updown_task) {
4113 		torture_stop_kthread(rcu_torture_updown, updown_task);
4114 		updown_task = NULL;
4115 	}
4116 	if (reader_tasks) {
4117 		for (i = 0; i < nrealreaders; i++)
4118 			torture_stop_kthread(rcu_torture_reader,
4119 					     reader_tasks[i]);
4120 		kfree(reader_tasks);
4121 		reader_tasks = NULL;
4122 	}
4123 	kfree(rcu_torture_reader_mbchk);
4124 	rcu_torture_reader_mbchk = NULL;
4125 
4126 	if (fakewriter_tasks) {
4127 		for (i = 0; i < nrealfakewriters; i++)
4128 			torture_stop_kthread(rcu_torture_fakewriter,
4129 					     fakewriter_tasks[i]);
4130 		kfree(fakewriter_tasks);
4131 		fakewriter_tasks = NULL;
4132 	}
4133 
4134 	if (cur_ops->get_gp_data)
4135 		cur_ops->get_gp_data(&flags, &gp_seq);
4136 	pr_alert("%s:  End-test grace-period state: g%ld f%#x total-gps=%ld\n",
4137 		 cur_ops->name, (long)gp_seq, flags,
4138 		 rcutorture_seq_diff(gp_seq, start_gp_seq));
4139 	torture_stop_kthread(rcu_torture_stats, stats_task);
4140 	torture_stop_kthread(rcu_torture_fqs, fqs_task);
4141 	if (rcu_torture_can_boost() && rcutor_hp >= 0)
4142 		cpuhp_remove_state(rcutor_hp);
4143 
4144 	/*
4145 	 * Wait for all RCU callbacks to fire, then do torture-type-specific
4146 	 * cleanup operations.
4147 	 */
4148 	if (cur_ops->cb_barrier != NULL) {
4149 		pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
4150 		cur_ops->cb_barrier();
4151 	}
4152 	if (cur_ops->cleanup != NULL)
4153 		cur_ops->cleanup();
4154 
4155 	rcu_torture_mem_dump_obj();
4156 
4157 	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
4158 
4159 	if (err_segs_recorded) {
4160 		pr_alert("Failure/close-call rcutorture reader segments:\n");
4161 		if (rt_read_nsegs == 0)
4162 			pr_alert("\t: No segments recorded!!!\n");
4163 		firsttime = 1;
4164 		for (i = 0; i < rt_read_nsegs; i++) {
4165 			if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP))
4166 				pr_alert("\t%lluus ", div64_u64(err_segs[i].rt_ts, 1000ULL));
4167 			else
4168 				pr_alert("\t");
4169 			pr_cont("%d: %#4x", i, err_segs[i].rt_readstate);
4170 			if (err_segs[i].rt_delay_jiffies != 0) {
4171 				pr_cont("%s%ldjiffies", firsttime ? "" : "+",
4172 					err_segs[i].rt_delay_jiffies);
4173 				firsttime = 0;
4174 			}
4175 			if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) {
4176 				pr_cont(" CPU %2d", err_segs[i].rt_cpu);
4177 				if (err_segs[i].rt_cpu != err_segs[i].rt_end_cpu)
4178 					pr_cont("->%-2d", err_segs[i].rt_end_cpu);
4179 				else
4180 					pr_cont(" ...");
4181 			}
4182 			if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP) &&
4183 			    cur_ops->gather_gp_seqs && cur_ops->format_gp_seqs) {
4184 				char buf1[20+1];
4185 				char buf2[20+1];
4186 				char sepchar = '-';
4187 
4188 				cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq,
4189 							buf1, ARRAY_SIZE(buf1));
4190 				cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq_end,
4191 							buf2, ARRAY_SIZE(buf2));
4192 				if (err_segs[i].rt_gp_seq == err_segs[i].rt_gp_seq_end) {
4193 					if (buf2[0]) {
4194 						for (j = 0; buf2[j]; j++)
4195 							buf2[j] = '.';
4196 						if (j)
4197 							buf2[j - 1] = ' ';
4198 					}
4199 					sepchar = ' ';
4200 				}
4201 				pr_cont(" %s%c%s", buf1, sepchar, buf2);
4202 			}
4203 			if (err_segs[i].rt_delay_ms != 0) {
4204 				pr_cont(" %s%ldms", firsttime ? "" : "+",
4205 					err_segs[i].rt_delay_ms);
4206 				firsttime = 0;
4207 			}
4208 			if (err_segs[i].rt_delay_us != 0) {
4209 				pr_cont(" %s%ldus", firsttime ? "" : "+",
4210 					err_segs[i].rt_delay_us);
4211 				firsttime = 0;
4212 			}
4213 			pr_cont("%s", err_segs[i].rt_preempted ? " preempted" : "");
4214 			if (err_segs[i].rt_readstate & RCUTORTURE_RDR_BH)
4215 				pr_cont(" BH");
4216 			if (err_segs[i].rt_readstate & RCUTORTURE_RDR_IRQ)
4217 				pr_cont(" IRQ");
4218 			if (err_segs[i].rt_readstate & RCUTORTURE_RDR_PREEMPT)
4219 				pr_cont(" PREEMPT");
4220 			if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RBH)
4221 				pr_cont(" RBH");
4222 			if (err_segs[i].rt_readstate & RCUTORTURE_RDR_SCHED)
4223 				pr_cont(" SCHED");
4224 			if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_1)
4225 				pr_cont(" RCU_1");
4226 			if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_2)
4227 				pr_cont(" RCU_2");
4228 			pr_cont("\n");
4229 
4230 		}
4231 		if (rt_read_preempted)
4232 			pr_alert("\tReader was preempted.\n");
4233 	}
4234 	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
4235 		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
4236 	else if (torture_onoff_failures())
4237 		rcu_torture_print_module_parms(cur_ops,
4238 					       "End of test: RCU_HOTPLUG");
4239 	else
4240 		rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
4241 	torture_cleanup_end();
4242 	if (cur_ops->gp_slow_unregister)
4243 		cur_ops->gp_slow_unregister(NULL);
4244 
4245 	if (gpwrap_lag && cur_ops->set_gpwrap_lag)
4246 		rcu_gpwrap_lag_cleanup();
4247 }
4248 
4249 static void rcu_torture_leak_cb(struct rcu_head *rhp)
4250 {
4251 }
4252 
4253 static void rcu_torture_err_cb(struct rcu_head *rhp)
4254 {
4255 	/*
4256 	 * This -might- happen due to race conditions, but is unlikely.
4257 	 * The scenario that leads to this happening is that the
4258 	 * first of the pair of duplicate callbacks is queued,
4259 	 * someone else starts a grace period that includes that
4260 	 * callback, then the second of the pair must wait for the
4261 	 * next grace period.  Unlikely, but can happen.  If it
4262 	 * does happen, the debug-objects subsystem won't have splatted.
4263 	 */
4264 	pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
4265 }
4266 
4267 /*
4268  * Verify that double-free causes debug-objects to complain, but only
4269  * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
4270  * cannot be carried out.
4271  */
4272 static void rcu_test_debug_objects(void)
4273 {
4274 	struct rcu_head rh1;
4275 	struct rcu_head rh2;
4276 	int idx;
4277 
4278 	if (!IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) {
4279 		pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_%s()\n",
4280 					KBUILD_MODNAME, cur_ops->name);
4281 		return;
4282 	}
4283 
4284 	if (WARN_ON_ONCE(cur_ops->debug_objects &&
4285 			(!cur_ops->call || !cur_ops->cb_barrier)))
4286 		return;
4287 
4288 	struct rcu_head *rhp = kmalloc_obj(*rhp);
4289 
4290 	init_rcu_head_on_stack(&rh1);
4291 	init_rcu_head_on_stack(&rh2);
4292 	pr_alert("%s: WARN: Duplicate call_%s() test starting.\n", KBUILD_MODNAME, cur_ops->name);
4293 
4294 	/* Try to queue the rh2 pair of callbacks for the same grace period. */
4295 	idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */
4296 	cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */
4297 	cur_ops->call(&rh2, rcu_torture_leak_cb);
4298 	cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
4299 	if (rhp) {
4300 		cur_ops->call(rhp, rcu_torture_leak_cb);
4301 		cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
4302 	}
4303 	cur_ops->readunlock(idx);
4304 
4305 	/* Wait for them all to get done so we can safely return. */
4306 	cur_ops->cb_barrier();
4307 	pr_alert("%s: WARN: Duplicate call_%s() test complete.\n", KBUILD_MODNAME, cur_ops->name);
4308 	destroy_rcu_head_on_stack(&rh1);
4309 	destroy_rcu_head_on_stack(&rh2);
4310 	kfree(rhp);
4311 }
4312 
4313 static void rcutorture_sync(void)
4314 {
4315 	static unsigned long n;
4316 
4317 	if (cur_ops->sync && !(++n & 0xfff))
4318 		cur_ops->sync();
4319 }
4320 
4321 static DEFINE_MUTEX(mut0);
4322 static DEFINE_MUTEX(mut1);
4323 static DEFINE_MUTEX(mut2);
4324 static DEFINE_MUTEX(mut3);
4325 static DEFINE_MUTEX(mut4);
4326 static DEFINE_MUTEX(mut5);
4327 static DEFINE_MUTEX(mut6);
4328 static DEFINE_MUTEX(mut7);
4329 static DEFINE_MUTEX(mut8);
4330 static DEFINE_MUTEX(mut9);
4331 
4332 static DECLARE_RWSEM(rwsem0);
4333 static DECLARE_RWSEM(rwsem1);
4334 static DECLARE_RWSEM(rwsem2);
4335 static DECLARE_RWSEM(rwsem3);
4336 static DECLARE_RWSEM(rwsem4);
4337 static DECLARE_RWSEM(rwsem5);
4338 static DECLARE_RWSEM(rwsem6);
4339 static DECLARE_RWSEM(rwsem7);
4340 static DECLARE_RWSEM(rwsem8);
4341 static DECLARE_RWSEM(rwsem9);
4342 
4343 DEFINE_STATIC_SRCU(srcu0);
4344 DEFINE_STATIC_SRCU(srcu1);
4345 DEFINE_STATIC_SRCU(srcu2);
4346 DEFINE_STATIC_SRCU(srcu3);
4347 DEFINE_STATIC_SRCU(srcu4);
4348 DEFINE_STATIC_SRCU(srcu5);
4349 DEFINE_STATIC_SRCU(srcu6);
4350 DEFINE_STATIC_SRCU(srcu7);
4351 DEFINE_STATIC_SRCU(srcu8);
4352 DEFINE_STATIC_SRCU(srcu9);
4353 
4354 static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i,
4355 			     int cyclelen, int deadlock)
4356 {
4357 	int j = i + 1;
4358 
4359 	if (j >= cyclelen)
4360 		j = deadlock ? 0 : -1;
4361 	if (j >= 0)
4362 		pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i);
4363 	else
4364 		pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i);
4365 	return j;
4366 }
4367 
4368 // Test lockdep on SRCU-based deadlock scenarios.
4369 static void rcu_torture_init_srcu_lockdep(void)
4370 {
4371 	int cyclelen;
4372 	int deadlock;
4373 	bool err = false;
4374 	int i;
4375 	int j;
4376 	int idx;
4377 	struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4,
4378 				 &mut5, &mut6, &mut7, &mut8, &mut9 };
4379 	struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4,
4380 					  &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 };
4381 	struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4,
4382 					&srcu5, &srcu6, &srcu7, &srcu8, &srcu9 };
4383 	int testtype;
4384 
4385 	if (!test_srcu_lockdep)
4386 		return;
4387 
4388 	deadlock = test_srcu_lockdep / 1000;
4389 	testtype = (test_srcu_lockdep / 10) % 100;
4390 	cyclelen = test_srcu_lockdep % 10;
4391 	WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus));
4392 	if (WARN_ONCE(deadlock != !!deadlock,
4393 		      "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n",
4394 		      __func__, test_srcu_lockdep, deadlock))
4395 		err = true;
4396 	if (WARN_ONCE(cyclelen <= 0,
4397 		      "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n",
4398 		      __func__, test_srcu_lockdep, cyclelen))
4399 		err = true;
4400 	if (err)
4401 		goto err_out;
4402 
4403 	if (testtype == 0) {
4404 		pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n",
4405 			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
4406 		if (deadlock && cyclelen == 1)
4407 			pr_info("%s: Expect hang.\n", __func__);
4408 		for (i = 0; i < cyclelen; i++) {
4409 			j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu",
4410 					      "srcu_read_unlock", i, cyclelen, deadlock);
4411 			idx = srcu_read_lock(srcus[i]);
4412 			if (j >= 0)
4413 				synchronize_srcu(srcus[j]);
4414 			srcu_read_unlock(srcus[i], idx);
4415 		}
4416 		return;
4417 	}
4418 
4419 	if (testtype == 1) {
4420 		pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n",
4421 			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
4422 		for (i = 0; i < cyclelen; i++) {
4423 			pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n",
4424 				__func__, i, i, i, i);
4425 			idx = srcu_read_lock(srcus[i]);
4426 			mutex_lock(muts[i]);
4427 			mutex_unlock(muts[i]);
4428 			srcu_read_unlock(srcus[i], idx);
4429 
4430 			j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu",
4431 					      "mutex_unlock", i, cyclelen, deadlock);
4432 			mutex_lock(muts[i]);
4433 			if (j >= 0)
4434 				synchronize_srcu(srcus[j]);
4435 			mutex_unlock(muts[i]);
4436 		}
4437 		return;
4438 	}
4439 
4440 	if (testtype == 2) {
4441 		pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n",
4442 			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
4443 		for (i = 0; i < cyclelen; i++) {
4444 			pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n",
4445 				__func__, i, i, i, i);
4446 			idx = srcu_read_lock(srcus[i]);
4447 			down_read(rwsems[i]);
4448 			up_read(rwsems[i]);
4449 			srcu_read_unlock(srcus[i], idx);
4450 
4451 			j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu",
4452 					      "up_write", i, cyclelen, deadlock);
4453 			down_write(rwsems[i]);
4454 			if (j >= 0)
4455 				synchronize_srcu(srcus[j]);
4456 			up_write(rwsems[i]);
4457 		}
4458 		return;
4459 	}
4460 
4461 #ifdef CONFIG_TASKS_TRACE_RCU
4462 	if (testtype == 3) {
4463 		pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n",
4464 			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
4465 		if (deadlock && cyclelen == 1)
4466 			pr_info("%s: Expect hang.\n", __func__);
4467 		for (i = 0; i < cyclelen; i++) {
4468 			char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock";
4469 			char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace"
4470 						     : "synchronize_srcu";
4471 			char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock";
4472 
4473 			j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock);
4474 			if (i == 0)
4475 				rcu_read_lock_trace();
4476 			else
4477 				idx = srcu_read_lock(srcus[i]);
4478 			if (j >= 0) {
4479 				if (i == cyclelen - 1)
4480 					synchronize_rcu_tasks_trace();
4481 				else
4482 					synchronize_srcu(srcus[j]);
4483 			}
4484 			if (i == 0)
4485 				rcu_read_unlock_trace();
4486 			else
4487 				srcu_read_unlock(srcus[i], idx);
4488 		}
4489 		return;
4490 	}
4491 #endif // #ifdef CONFIG_TASKS_TRACE_RCU
4492 
4493 err_out:
4494 	pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep);
4495 	pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__);
4496 	pr_info("%s: D: Deadlock if nonzero.\n", __func__);
4497 	pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__);
4498 	pr_info("%s: L: Cycle length.\n", __func__);
4499 	if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU))
4500 		pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__);
4501 }
4502 
4503 static int __init
4504 rcu_torture_init(void)
4505 {
4506 	long i;
4507 	int cpu;
4508 	int firsterr = 0;
4509 	int flags = 0;
4510 	unsigned long gp_seq = 0;
4511 	static struct rcu_torture_ops *torture_ops[] = {
4512 		&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
4513 		TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
4514 		&trivial_ops, TRIVIAL_PREEMPT_OPS
4515 	};
4516 
4517 	if (!torture_init_begin(torture_type, verbose))
4518 		return -EBUSY;
4519 
4520 	/* Process args and tell the world that the torturer is on the job. */
4521 	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
4522 		cur_ops = torture_ops[i];
4523 		if (strcmp(torture_type, cur_ops->name) == 0)
4524 			break;
4525 	}
4526 	if (i == ARRAY_SIZE(torture_ops)) {
4527 		pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
4528 			 torture_type);
4529 		pr_alert("rcu-torture types:");
4530 		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
4531 			pr_cont(" %s", torture_ops[i]->name);
4532 		pr_cont("\n");
4533 		firsterr = -EINVAL;
4534 		cur_ops = NULL;
4535 		goto unwind;
4536 	}
4537 	if (cur_ops->fqs == NULL && fqs_duration != 0) {
4538 		pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
4539 		fqs_duration = 0;
4540 	}
4541 	if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops ||
4542 				    !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) {
4543 		pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n",
4544 			 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU));
4545 		nocbs_nthreads = 0;
4546 	}
4547 	if (cur_ops->init)
4548 		cur_ops->init();
4549 
4550 	rcu_torture_init_srcu_lockdep();
4551 
4552 	if (nfakewriters >= 0) {
4553 		nrealfakewriters = nfakewriters;
4554 	} else {
4555 		nrealfakewriters = num_online_cpus() - 2 - nfakewriters;
4556 		if (nrealfakewriters <= 0)
4557 			nrealfakewriters = 1;
4558 	}
4559 
4560 	if (nreaders >= 0) {
4561 		nrealreaders = nreaders;
4562 	} else {
4563 		nrealreaders = num_online_cpus() - 2 - nreaders;
4564 		if (nrealreaders <= 0)
4565 			nrealreaders = 1;
4566 	}
4567 	rcu_torture_print_module_parms(cur_ops, "Start of test");
4568 	if (cur_ops->get_gp_data)
4569 		cur_ops->get_gp_data(&flags, &gp_seq);
4570 	start_gp_seq = gp_seq;
4571 	pr_alert("%s:  Start-test grace-period state: g%ld f%#x\n",
4572 		 cur_ops->name, (long)gp_seq, flags);
4573 
4574 	/* Set up the freelist. */
4575 
4576 	INIT_LIST_HEAD(&rcu_torture_freelist);
4577 	for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
4578 		rcu_tortures[i].rtort_mbtest = 0;
4579 		list_add_tail(&rcu_tortures[i].rtort_free,
4580 			      &rcu_torture_freelist);
4581 	}
4582 
4583 	/* Initialize the statistics so that each run gets its own numbers. */
4584 
4585 	rcu_torture_current = NULL;
4586 	rcu_torture_current_version = 0;
4587 	atomic_set(&n_rcu_torture_alloc, 0);
4588 	atomic_set(&n_rcu_torture_alloc_fail, 0);
4589 	atomic_set(&n_rcu_torture_free, 0);
4590 	atomic_set(&n_rcu_torture_mberror, 0);
4591 	atomic_set(&n_rcu_torture_mbchk_fail, 0);
4592 	atomic_set(&n_rcu_torture_mbchk_tries, 0);
4593 	atomic_set(&n_rcu_torture_error, 0);
4594 	n_rcu_torture_barrier_error = 0;
4595 	n_rcu_torture_boost_ktrerror = 0;
4596 	n_rcu_torture_boost_failure = 0;
4597 	n_rcu_torture_boosts = 0;
4598 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
4599 		atomic_set(&rcu_torture_wcount[i], 0);
4600 	for_each_possible_cpu(cpu) {
4601 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
4602 			per_cpu(rcu_torture_count, cpu)[i] = 0;
4603 			per_cpu(rcu_torture_batch, cpu)[i] = 0;
4604 		}
4605 	}
4606 	err_segs_recorded = 0;
4607 	rt_read_nsegs = 0;
4608 
4609 	/* Start up the kthreads. */
4610 
4611 	rcu_torture_write_types();
4612 	if (nrealfakewriters > 0) {
4613 		fakewriter_tasks = kzalloc_objs(fakewriter_tasks[0],
4614 						nrealfakewriters);
4615 		if (fakewriter_tasks == NULL) {
4616 			TOROUT_ERRSTRING("out of memory");
4617 			firsterr = -ENOMEM;
4618 			goto unwind;
4619 		}
4620 	}
4621 	for (i = 0; i < nrealfakewriters; i++) {
4622 		firsterr = torture_create_kthread(rcu_torture_fakewriter,
4623 						  NULL, fakewriter_tasks[i]);
4624 		if (torture_init_error(firsterr))
4625 			goto unwind;
4626 	}
4627 	reader_tasks = kzalloc_objs(reader_tasks[0], nrealreaders);
4628 	rcu_torture_reader_mbchk = kzalloc_objs(*rcu_torture_reader_mbchk,
4629 						nrealreaders);
4630 	if (!reader_tasks || !rcu_torture_reader_mbchk) {
4631 		TOROUT_ERRSTRING("out of memory");
4632 		firsterr = -ENOMEM;
4633 		goto unwind;
4634 	}
4635 	for (i = 0; i < nrealreaders; i++) {
4636 		rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
4637 		firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
4638 						  reader_tasks[i]);
4639 		if (torture_init_error(firsterr))
4640 			goto unwind;
4641 	}
4642 
4643 	firsterr = torture_create_kthread(rcu_torture_writer, NULL,
4644 					  writer_task);
4645 	if (torture_init_error(firsterr))
4646 		goto unwind;
4647 
4648 	firsterr = rcu_torture_updown_init();
4649 	if (torture_init_error(firsterr))
4650 		goto unwind;
4651 	nrealnocbers = nocbs_nthreads;
4652 	if (WARN_ON(nrealnocbers < 0))
4653 		nrealnocbers = 1;
4654 	if (WARN_ON(nocbs_toggle < 0))
4655 		nocbs_toggle = HZ;
4656 	if (nrealnocbers > 0) {
4657 		nocb_tasks = kzalloc_objs(nocb_tasks[0], nrealnocbers);
4658 		if (nocb_tasks == NULL) {
4659 			TOROUT_ERRSTRING("out of memory");
4660 			firsterr = -ENOMEM;
4661 			goto unwind;
4662 		}
4663 	} else {
4664 		nocb_tasks = NULL;
4665 	}
4666 	for (i = 0; i < nrealnocbers; i++) {
4667 		firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
4668 		if (torture_init_error(firsterr))
4669 			goto unwind;
4670 	}
4671 	if (stat_interval > 0) {
4672 		firsterr = torture_create_kthread(rcu_torture_stats, NULL,
4673 						  stats_task);
4674 		if (torture_init_error(firsterr))
4675 			goto unwind;
4676 	}
4677 	if (test_no_idle_hz && shuffle_interval > 0) {
4678 		firsterr = torture_shuffle_init(shuffle_interval * HZ);
4679 		if (torture_init_error(firsterr))
4680 			goto unwind;
4681 	}
4682 	if (stutter < 0)
4683 		stutter = 0;
4684 	if (stutter) {
4685 		int t;
4686 
4687 		t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
4688 		firsterr = torture_stutter_init(stutter * HZ, t);
4689 		if (torture_init_error(firsterr))
4690 			goto unwind;
4691 	}
4692 	if (fqs_duration < 0)
4693 		fqs_duration = 0;
4694 	if (fqs_holdoff < 0)
4695 		fqs_holdoff = 0;
4696 	if (fqs_duration && fqs_holdoff) {
4697 		/* Create the fqs thread */
4698 		firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
4699 						  fqs_task);
4700 		if (torture_init_error(firsterr))
4701 			goto unwind;
4702 	}
4703 	if (test_boost_interval < 1)
4704 		test_boost_interval = 1;
4705 	if (test_boost_duration < 2)
4706 		test_boost_duration = 2;
4707 	if (rcu_torture_can_boost()) {
4708 
4709 		boost_starttime = jiffies + test_boost_interval * HZ;
4710 
4711 		firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
4712 					     rcutorture_booster_init,
4713 					     rcutorture_booster_cleanup);
4714 		rcutor_hp = firsterr;
4715 		if (torture_init_error(firsterr))
4716 			goto unwind;
4717 	}
4718 	shutdown_jiffies = jiffies + shutdown_secs * HZ;
4719 	firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
4720 	if (torture_init_error(firsterr))
4721 		goto unwind;
4722 	firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
4723 				      rcutorture_sync);
4724 	if (torture_init_error(firsterr))
4725 		goto unwind;
4726 	firsterr = rcu_torture_stall_init();
4727 	if (torture_init_error(firsterr))
4728 		goto unwind;
4729 	firsterr = rcu_torture_fwd_prog_init();
4730 	if (torture_init_error(firsterr))
4731 		goto unwind;
4732 	firsterr = rcu_torture_barrier_init();
4733 	if (torture_init_error(firsterr))
4734 		goto unwind;
4735 	firsterr = rcu_torture_read_exit_init();
4736 	if (torture_init_error(firsterr))
4737 		goto unwind;
4738 	if (preempt_duration > 0) {
4739 		firsterr = torture_create_kthread(rcu_torture_preempt, NULL, preempt_task);
4740 		if (torture_init_error(firsterr))
4741 			goto unwind;
4742 	}
4743 	if (object_debug)
4744 		rcu_test_debug_objects();
4745 
4746 	if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister))
4747 		cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay);
4748 
4749 	if (gpwrap_lag && cur_ops->set_gpwrap_lag) {
4750 		firsterr = rcu_gpwrap_lag_init();
4751 		if (torture_init_error(firsterr))
4752 			goto unwind;
4753 	}
4754 
4755 	torture_init_end();
4756 	return 0;
4757 
4758 unwind:
4759 	torture_init_end();
4760 	rcu_torture_cleanup();
4761 	if (shutdown_secs) {
4762 		WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
4763 		kernel_power_off();
4764 	}
4765 	return firsterr;
4766 }
4767 
4768 module_init(rcu_torture_init);
4769 module_exit(rcu_torture_cleanup);
4770