xref: /linux/kernel/rcu/rcutorture.c (revision 2363088eba2ecccfb643725e4864af73c4226a04)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update module-based torture test facility
4  *
5  * Copyright (C) IBM Corporation, 2005, 2006
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  *	  Josh Triplett <josh@joshtriplett.org>
9  *
10  * See also:  Documentation/RCU/torture.rst
11  */
12 
13 #define pr_fmt(fmt) fmt
14 
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
47 #include <linux/tick.h>
48 #include <linux/rcupdate_trace.h>
49 #include <linux/nmi.h>
50 
51 #include "rcu.h"
52 
53 MODULE_LICENSE("GPL");
54 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
55 
56 /* Bits for ->extendables field, extendables param, and related definitions. */
57 #define RCUTORTURE_RDR_SHIFT_1	 8	/* Put SRCU index in upper bits. */
58 #define RCUTORTURE_RDR_MASK_1	 (1 << RCUTORTURE_RDR_SHIFT_1)
59 #define RCUTORTURE_RDR_SHIFT_2	 9	/* Put SRCU index in upper bits. */
60 #define RCUTORTURE_RDR_MASK_2	 (1 << RCUTORTURE_RDR_SHIFT_2)
61 #define RCUTORTURE_RDR_BH	 0x01	/* Extend readers by disabling bh. */
62 #define RCUTORTURE_RDR_IRQ	 0x02	/*  ... disabling interrupts. */
63 #define RCUTORTURE_RDR_PREEMPT	 0x04	/*  ... disabling preemption. */
64 #define RCUTORTURE_RDR_RBH	 0x08	/*  ... rcu_read_lock_bh(). */
65 #define RCUTORTURE_RDR_SCHED	 0x10	/*  ... rcu_read_lock_sched(). */
66 #define RCUTORTURE_RDR_RCU_1	 0x20	/*  ... entering another RCU reader. */
67 #define RCUTORTURE_RDR_RCU_2	 0x40	/*  ... entering another RCU reader. */
68 #define RCUTORTURE_RDR_NBITS	 7	/* Number of bits defined above. */
69 #define RCUTORTURE_MAX_EXTEND	 \
70 	(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
71 	 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
72 #define RCUTORTURE_RDR_MAX_LOOPS 0x7	/* Maximum reader extensions. */
73 					/* Must be power of two minus one. */
74 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
75 
76 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
77 	      "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
78 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable");
79 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
80 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
81 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
82 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
83 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
84 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
85 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
86 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives");
87 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
88 torture_param(bool, gp_cond_exp_full, false,
89 		    "Use conditional/async full-stateexpedited GP wait primitives");
90 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
91 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
92 torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
93 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
94 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
95 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
96 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
97 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
98 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
99 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
100 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
101 torture_param(int, nreaders, -1, "Number of RCU reader threads");
102 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
103 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
104 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
105 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
106 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
107 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
108 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
109 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
110 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
111 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
112 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s).");
113 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
114 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
115 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
116 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
117 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
118 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
119 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
120 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds.");
121 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds.");
122 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable.");
123 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs");
124 torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario.");
125 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
126 
127 static char *torture_type = "rcu";
128 module_param(torture_type, charp, 0444);
129 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
130 
131 static int nrealnocbers;
132 static int nrealreaders;
133 static struct task_struct *writer_task;
134 static struct task_struct **fakewriter_tasks;
135 static struct task_struct **reader_tasks;
136 static struct task_struct **nocb_tasks;
137 static struct task_struct *stats_task;
138 static struct task_struct *fqs_task;
139 static struct task_struct *boost_tasks[NR_CPUS];
140 static struct task_struct *stall_task;
141 static struct task_struct **fwd_prog_tasks;
142 static struct task_struct **barrier_cbs_tasks;
143 static struct task_struct *barrier_task;
144 static struct task_struct *read_exit_task;
145 
146 #define RCU_TORTURE_PIPE_LEN 10
147 
148 // Mailbox-like structure to check RCU global memory ordering.
149 struct rcu_torture_reader_check {
150 	unsigned long rtc_myloops;
151 	int rtc_chkrdr;
152 	unsigned long rtc_chkloops;
153 	int rtc_ready;
154 	struct rcu_torture_reader_check *rtc_assigner;
155 } ____cacheline_internodealigned_in_smp;
156 
157 // Update-side data structure used to check RCU readers.
158 struct rcu_torture {
159 	struct rcu_head rtort_rcu;
160 	int rtort_pipe_count;
161 	struct list_head rtort_free;
162 	int rtort_mbtest;
163 	struct rcu_torture_reader_check *rtort_chkp;
164 };
165 
166 static LIST_HEAD(rcu_torture_freelist);
167 static struct rcu_torture __rcu *rcu_torture_current;
168 static unsigned long rcu_torture_current_version;
169 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
170 static DEFINE_SPINLOCK(rcu_torture_lock);
171 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
172 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
173 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
174 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
175 static atomic_t n_rcu_torture_alloc;
176 static atomic_t n_rcu_torture_alloc_fail;
177 static atomic_t n_rcu_torture_free;
178 static atomic_t n_rcu_torture_mberror;
179 static atomic_t n_rcu_torture_mbchk_fail;
180 static atomic_t n_rcu_torture_mbchk_tries;
181 static atomic_t n_rcu_torture_error;
182 static long n_rcu_torture_barrier_error;
183 static long n_rcu_torture_boost_ktrerror;
184 static long n_rcu_torture_boost_failure;
185 static long n_rcu_torture_boosts;
186 static atomic_long_t n_rcu_torture_timers;
187 static long n_barrier_attempts;
188 static long n_barrier_successes; /* did rcu_barrier test succeed? */
189 static unsigned long n_read_exits;
190 static struct list_head rcu_torture_removed;
191 static unsigned long shutdown_jiffies;
192 static unsigned long start_gp_seq;
193 static atomic_long_t n_nocb_offload;
194 static atomic_long_t n_nocb_deoffload;
195 
196 static int rcu_torture_writer_state;
197 #define RTWS_FIXED_DELAY	0
198 #define RTWS_DELAY		1
199 #define RTWS_REPLACE		2
200 #define RTWS_DEF_FREE		3
201 #define RTWS_EXP_SYNC		4
202 #define RTWS_COND_GET		5
203 #define RTWS_COND_GET_FULL	6
204 #define RTWS_COND_GET_EXP	7
205 #define RTWS_COND_GET_EXP_FULL	8
206 #define RTWS_COND_SYNC		9
207 #define RTWS_COND_SYNC_FULL	10
208 #define RTWS_COND_SYNC_EXP	11
209 #define RTWS_COND_SYNC_EXP_FULL	12
210 #define RTWS_POLL_GET		13
211 #define RTWS_POLL_GET_FULL	14
212 #define RTWS_POLL_GET_EXP	15
213 #define RTWS_POLL_GET_EXP_FULL	16
214 #define RTWS_POLL_WAIT		17
215 #define RTWS_POLL_WAIT_FULL	18
216 #define RTWS_POLL_WAIT_EXP	19
217 #define RTWS_POLL_WAIT_EXP_FULL	20
218 #define RTWS_SYNC		21
219 #define RTWS_STUTTER		22
220 #define RTWS_STOPPING		23
221 static const char * const rcu_torture_writer_state_names[] = {
222 	"RTWS_FIXED_DELAY",
223 	"RTWS_DELAY",
224 	"RTWS_REPLACE",
225 	"RTWS_DEF_FREE",
226 	"RTWS_EXP_SYNC",
227 	"RTWS_COND_GET",
228 	"RTWS_COND_GET_FULL",
229 	"RTWS_COND_GET_EXP",
230 	"RTWS_COND_GET_EXP_FULL",
231 	"RTWS_COND_SYNC",
232 	"RTWS_COND_SYNC_FULL",
233 	"RTWS_COND_SYNC_EXP",
234 	"RTWS_COND_SYNC_EXP_FULL",
235 	"RTWS_POLL_GET",
236 	"RTWS_POLL_GET_FULL",
237 	"RTWS_POLL_GET_EXP",
238 	"RTWS_POLL_GET_EXP_FULL",
239 	"RTWS_POLL_WAIT",
240 	"RTWS_POLL_WAIT_FULL",
241 	"RTWS_POLL_WAIT_EXP",
242 	"RTWS_POLL_WAIT_EXP_FULL",
243 	"RTWS_SYNC",
244 	"RTWS_STUTTER",
245 	"RTWS_STOPPING",
246 };
247 
248 /* Record reader segment types and duration for first failing read. */
249 struct rt_read_seg {
250 	int rt_readstate;
251 	unsigned long rt_delay_jiffies;
252 	unsigned long rt_delay_ms;
253 	unsigned long rt_delay_us;
254 	bool rt_preempted;
255 };
256 static int err_segs_recorded;
257 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
258 static int rt_read_nsegs;
259 
260 static const char *rcu_torture_writer_state_getname(void)
261 {
262 	unsigned int i = READ_ONCE(rcu_torture_writer_state);
263 
264 	if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
265 		return "???";
266 	return rcu_torture_writer_state_names[i];
267 }
268 
269 #ifdef CONFIG_RCU_TRACE
270 static u64 notrace rcu_trace_clock_local(void)
271 {
272 	u64 ts = trace_clock_local();
273 
274 	(void)do_div(ts, NSEC_PER_USEC);
275 	return ts;
276 }
277 #else /* #ifdef CONFIG_RCU_TRACE */
278 static u64 notrace rcu_trace_clock_local(void)
279 {
280 	return 0ULL;
281 }
282 #endif /* #else #ifdef CONFIG_RCU_TRACE */
283 
284 /*
285  * Stop aggressive CPU-hog tests a bit before the end of the test in order
286  * to avoid interfering with test shutdown.
287  */
288 static bool shutdown_time_arrived(void)
289 {
290 	return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
291 }
292 
293 static unsigned long boost_starttime;	/* jiffies of next boost test start. */
294 static DEFINE_MUTEX(boost_mutex);	/* protect setting boost_starttime */
295 					/*  and boost task create/destroy. */
296 static atomic_t barrier_cbs_count;	/* Barrier callbacks registered. */
297 static bool barrier_phase;		/* Test phase. */
298 static atomic_t barrier_cbs_invoked;	/* Barrier callbacks invoked. */
299 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
300 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
301 
302 static atomic_t rcu_fwd_cb_nodelay;	/* Short rcu_torture_delay() delays. */
303 
304 /*
305  * Allocate an element from the rcu_tortures pool.
306  */
307 static struct rcu_torture *
308 rcu_torture_alloc(void)
309 {
310 	struct list_head *p;
311 
312 	spin_lock_bh(&rcu_torture_lock);
313 	if (list_empty(&rcu_torture_freelist)) {
314 		atomic_inc(&n_rcu_torture_alloc_fail);
315 		spin_unlock_bh(&rcu_torture_lock);
316 		return NULL;
317 	}
318 	atomic_inc(&n_rcu_torture_alloc);
319 	p = rcu_torture_freelist.next;
320 	list_del_init(p);
321 	spin_unlock_bh(&rcu_torture_lock);
322 	return container_of(p, struct rcu_torture, rtort_free);
323 }
324 
325 /*
326  * Free an element to the rcu_tortures pool.
327  */
328 static void
329 rcu_torture_free(struct rcu_torture *p)
330 {
331 	atomic_inc(&n_rcu_torture_free);
332 	spin_lock_bh(&rcu_torture_lock);
333 	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
334 	spin_unlock_bh(&rcu_torture_lock);
335 }
336 
337 /*
338  * Operations vector for selecting different types of tests.
339  */
340 
341 struct rcu_torture_ops {
342 	int ttype;
343 	void (*init)(void);
344 	void (*cleanup)(void);
345 	int (*readlock)(void);
346 	void (*read_delay)(struct torture_random_state *rrsp,
347 			   struct rt_read_seg *rtrsp);
348 	void (*readunlock)(int idx);
349 	int (*readlock_held)(void);
350 	unsigned long (*get_gp_seq)(void);
351 	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
352 	void (*deferred_free)(struct rcu_torture *p);
353 	void (*sync)(void);
354 	void (*exp_sync)(void);
355 	unsigned long (*get_gp_state_exp)(void);
356 	unsigned long (*start_gp_poll_exp)(void);
357 	void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp);
358 	bool (*poll_gp_state_exp)(unsigned long oldstate);
359 	void (*cond_sync_exp)(unsigned long oldstate);
360 	void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp);
361 	unsigned long (*get_comp_state)(void);
362 	void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp);
363 	bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2);
364 	bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2);
365 	unsigned long (*get_gp_state)(void);
366 	void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
367 	unsigned long (*get_gp_completed)(void);
368 	void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp);
369 	unsigned long (*start_gp_poll)(void);
370 	void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
371 	bool (*poll_gp_state)(unsigned long oldstate);
372 	bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp);
373 	bool (*poll_need_2gp)(bool poll, bool poll_full);
374 	void (*cond_sync)(unsigned long oldstate);
375 	void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
376 	call_rcu_func_t call;
377 	void (*cb_barrier)(void);
378 	void (*fqs)(void);
379 	void (*stats)(void);
380 	void (*gp_kthread_dbg)(void);
381 	bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
382 	int (*stall_dur)(void);
383 	long cbflood_max;
384 	int irq_capable;
385 	int can_boost;
386 	int extendables;
387 	int slow_gps;
388 	int no_pi_lock;
389 	const char *name;
390 };
391 
392 static struct rcu_torture_ops *cur_ops;
393 
394 /*
395  * Definitions for rcu torture testing.
396  */
397 
398 static int torture_readlock_not_held(void)
399 {
400 	return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
401 }
402 
403 static int rcu_torture_read_lock(void)
404 {
405 	rcu_read_lock();
406 	return 0;
407 }
408 
409 static void
410 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
411 {
412 	unsigned long started;
413 	unsigned long completed;
414 	const unsigned long shortdelay_us = 200;
415 	unsigned long longdelay_ms = 300;
416 	unsigned long long ts;
417 
418 	/* We want a short delay sometimes to make a reader delay the grace
419 	 * period, and we want a long delay occasionally to trigger
420 	 * force_quiescent_state. */
421 
422 	if (!atomic_read(&rcu_fwd_cb_nodelay) &&
423 	    !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
424 		started = cur_ops->get_gp_seq();
425 		ts = rcu_trace_clock_local();
426 		if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
427 			longdelay_ms = 5; /* Avoid triggering BH limits. */
428 		mdelay(longdelay_ms);
429 		rtrsp->rt_delay_ms = longdelay_ms;
430 		completed = cur_ops->get_gp_seq();
431 		do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
432 					  started, completed);
433 	}
434 	if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
435 		udelay(shortdelay_us);
436 		rtrsp->rt_delay_us = shortdelay_us;
437 	}
438 	if (!preempt_count() &&
439 	    !(torture_random(rrsp) % (nrealreaders * 500))) {
440 		torture_preempt_schedule();  /* QS only if preemptible. */
441 		rtrsp->rt_preempted = true;
442 	}
443 }
444 
445 static void rcu_torture_read_unlock(int idx)
446 {
447 	rcu_read_unlock();
448 }
449 
450 /*
451  * Update callback in the pipe.  This should be invoked after a grace period.
452  */
453 static bool
454 rcu_torture_pipe_update_one(struct rcu_torture *rp)
455 {
456 	int i;
457 	struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
458 
459 	if (rtrcp) {
460 		WRITE_ONCE(rp->rtort_chkp, NULL);
461 		smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
462 	}
463 	i = READ_ONCE(rp->rtort_pipe_count);
464 	if (i > RCU_TORTURE_PIPE_LEN)
465 		i = RCU_TORTURE_PIPE_LEN;
466 	atomic_inc(&rcu_torture_wcount[i]);
467 	WRITE_ONCE(rp->rtort_pipe_count, i + 1);
468 	if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
469 		rp->rtort_mbtest = 0;
470 		return true;
471 	}
472 	return false;
473 }
474 
475 /*
476  * Update all callbacks in the pipe.  Suitable for synchronous grace-period
477  * primitives.
478  */
479 static void
480 rcu_torture_pipe_update(struct rcu_torture *old_rp)
481 {
482 	struct rcu_torture *rp;
483 	struct rcu_torture *rp1;
484 
485 	if (old_rp)
486 		list_add(&old_rp->rtort_free, &rcu_torture_removed);
487 	list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
488 		if (rcu_torture_pipe_update_one(rp)) {
489 			list_del(&rp->rtort_free);
490 			rcu_torture_free(rp);
491 		}
492 	}
493 }
494 
495 static void
496 rcu_torture_cb(struct rcu_head *p)
497 {
498 	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
499 
500 	if (torture_must_stop_irq()) {
501 		/* Test is ending, just drop callbacks on the floor. */
502 		/* The next initialization will pick up the pieces. */
503 		return;
504 	}
505 	if (rcu_torture_pipe_update_one(rp))
506 		rcu_torture_free(rp);
507 	else
508 		cur_ops->deferred_free(rp);
509 }
510 
511 static unsigned long rcu_no_completed(void)
512 {
513 	return 0;
514 }
515 
516 static void rcu_torture_deferred_free(struct rcu_torture *p)
517 {
518 	call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb);
519 }
520 
521 static void rcu_sync_torture_init(void)
522 {
523 	INIT_LIST_HEAD(&rcu_torture_removed);
524 }
525 
526 static bool rcu_poll_need_2gp(bool poll, bool poll_full)
527 {
528 	return poll;
529 }
530 
531 static struct rcu_torture_ops rcu_ops = {
532 	.ttype			= RCU_FLAVOR,
533 	.init			= rcu_sync_torture_init,
534 	.readlock		= rcu_torture_read_lock,
535 	.read_delay		= rcu_read_delay,
536 	.readunlock		= rcu_torture_read_unlock,
537 	.readlock_held		= torture_readlock_not_held,
538 	.get_gp_seq		= rcu_get_gp_seq,
539 	.gp_diff		= rcu_seq_diff,
540 	.deferred_free		= rcu_torture_deferred_free,
541 	.sync			= synchronize_rcu,
542 	.exp_sync		= synchronize_rcu_expedited,
543 	.same_gp_state		= same_state_synchronize_rcu,
544 	.same_gp_state_full	= same_state_synchronize_rcu_full,
545 	.get_comp_state		= get_completed_synchronize_rcu,
546 	.get_comp_state_full	= get_completed_synchronize_rcu_full,
547 	.get_gp_state		= get_state_synchronize_rcu,
548 	.get_gp_state_full	= get_state_synchronize_rcu_full,
549 	.get_gp_completed	= get_completed_synchronize_rcu,
550 	.get_gp_completed_full	= get_completed_synchronize_rcu_full,
551 	.start_gp_poll		= start_poll_synchronize_rcu,
552 	.start_gp_poll_full	= start_poll_synchronize_rcu_full,
553 	.poll_gp_state		= poll_state_synchronize_rcu,
554 	.poll_gp_state_full	= poll_state_synchronize_rcu_full,
555 	.poll_need_2gp		= rcu_poll_need_2gp,
556 	.cond_sync		= cond_synchronize_rcu,
557 	.cond_sync_full		= cond_synchronize_rcu_full,
558 	.get_gp_state_exp	= get_state_synchronize_rcu,
559 	.start_gp_poll_exp	= start_poll_synchronize_rcu_expedited,
560 	.start_gp_poll_exp_full	= start_poll_synchronize_rcu_expedited_full,
561 	.poll_gp_state_exp	= poll_state_synchronize_rcu,
562 	.cond_sync_exp		= cond_synchronize_rcu_expedited,
563 	.call			= call_rcu_hurry,
564 	.cb_barrier		= rcu_barrier,
565 	.fqs			= rcu_force_quiescent_state,
566 	.stats			= NULL,
567 	.gp_kthread_dbg		= show_rcu_gp_kthreads,
568 	.check_boost_failed	= rcu_check_boost_fail,
569 	.stall_dur		= rcu_jiffies_till_stall_check,
570 	.irq_capable		= 1,
571 	.can_boost		= IS_ENABLED(CONFIG_RCU_BOOST),
572 	.extendables		= RCUTORTURE_MAX_EXTEND,
573 	.name			= "rcu"
574 };
575 
576 /*
577  * Don't even think about trying any of these in real life!!!
578  * The names includes "busted", and they really means it!
579  * The only purpose of these functions is to provide a buggy RCU
580  * implementation to make sure that rcutorture correctly emits
581  * buggy-RCU error messages.
582  */
583 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
584 {
585 	/* This is a deliberate bug for testing purposes only! */
586 	rcu_torture_cb(&p->rtort_rcu);
587 }
588 
589 static void synchronize_rcu_busted(void)
590 {
591 	/* This is a deliberate bug for testing purposes only! */
592 }
593 
594 static void
595 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
596 {
597 	/* This is a deliberate bug for testing purposes only! */
598 	func(head);
599 }
600 
601 static struct rcu_torture_ops rcu_busted_ops = {
602 	.ttype		= INVALID_RCU_FLAVOR,
603 	.init		= rcu_sync_torture_init,
604 	.readlock	= rcu_torture_read_lock,
605 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
606 	.readunlock	= rcu_torture_read_unlock,
607 	.readlock_held	= torture_readlock_not_held,
608 	.get_gp_seq	= rcu_no_completed,
609 	.deferred_free	= rcu_busted_torture_deferred_free,
610 	.sync		= synchronize_rcu_busted,
611 	.exp_sync	= synchronize_rcu_busted,
612 	.call		= call_rcu_busted,
613 	.cb_barrier	= NULL,
614 	.fqs		= NULL,
615 	.stats		= NULL,
616 	.irq_capable	= 1,
617 	.name		= "busted"
618 };
619 
620 /*
621  * Definitions for srcu torture testing.
622  */
623 
624 DEFINE_STATIC_SRCU(srcu_ctl);
625 static struct srcu_struct srcu_ctld;
626 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
627 static struct rcu_torture_ops srcud_ops;
628 
629 static int srcu_torture_read_lock(void)
630 {
631 	if (cur_ops == &srcud_ops)
632 		return srcu_read_lock_nmisafe(srcu_ctlp);
633 	else
634 		return srcu_read_lock(srcu_ctlp);
635 }
636 
637 static void
638 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
639 {
640 	long delay;
641 	const long uspertick = 1000000 / HZ;
642 	const long longdelay = 10;
643 
644 	/* We want there to be long-running readers, but not all the time. */
645 
646 	delay = torture_random(rrsp) %
647 		(nrealreaders * 2 * longdelay * uspertick);
648 	if (!delay && in_task()) {
649 		schedule_timeout_interruptible(longdelay);
650 		rtrsp->rt_delay_jiffies = longdelay;
651 	} else {
652 		rcu_read_delay(rrsp, rtrsp);
653 	}
654 }
655 
656 static void srcu_torture_read_unlock(int idx)
657 {
658 	if (cur_ops == &srcud_ops)
659 		srcu_read_unlock_nmisafe(srcu_ctlp, idx);
660 	else
661 		srcu_read_unlock(srcu_ctlp, idx);
662 }
663 
664 static int torture_srcu_read_lock_held(void)
665 {
666 	return srcu_read_lock_held(srcu_ctlp);
667 }
668 
669 static unsigned long srcu_torture_completed(void)
670 {
671 	return srcu_batches_completed(srcu_ctlp);
672 }
673 
674 static void srcu_torture_deferred_free(struct rcu_torture *rp)
675 {
676 	call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
677 }
678 
679 static void srcu_torture_synchronize(void)
680 {
681 	synchronize_srcu(srcu_ctlp);
682 }
683 
684 static unsigned long srcu_torture_get_gp_state(void)
685 {
686 	return get_state_synchronize_srcu(srcu_ctlp);
687 }
688 
689 static unsigned long srcu_torture_start_gp_poll(void)
690 {
691 	return start_poll_synchronize_srcu(srcu_ctlp);
692 }
693 
694 static bool srcu_torture_poll_gp_state(unsigned long oldstate)
695 {
696 	return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
697 }
698 
699 static void srcu_torture_call(struct rcu_head *head,
700 			      rcu_callback_t func)
701 {
702 	call_srcu(srcu_ctlp, head, func);
703 }
704 
705 static void srcu_torture_barrier(void)
706 {
707 	srcu_barrier(srcu_ctlp);
708 }
709 
710 static void srcu_torture_stats(void)
711 {
712 	srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
713 }
714 
715 static void srcu_torture_synchronize_expedited(void)
716 {
717 	synchronize_srcu_expedited(srcu_ctlp);
718 }
719 
720 static struct rcu_torture_ops srcu_ops = {
721 	.ttype		= SRCU_FLAVOR,
722 	.init		= rcu_sync_torture_init,
723 	.readlock	= srcu_torture_read_lock,
724 	.read_delay	= srcu_read_delay,
725 	.readunlock	= srcu_torture_read_unlock,
726 	.readlock_held	= torture_srcu_read_lock_held,
727 	.get_gp_seq	= srcu_torture_completed,
728 	.deferred_free	= srcu_torture_deferred_free,
729 	.sync		= srcu_torture_synchronize,
730 	.exp_sync	= srcu_torture_synchronize_expedited,
731 	.get_gp_state	= srcu_torture_get_gp_state,
732 	.start_gp_poll	= srcu_torture_start_gp_poll,
733 	.poll_gp_state	= srcu_torture_poll_gp_state,
734 	.call		= srcu_torture_call,
735 	.cb_barrier	= srcu_torture_barrier,
736 	.stats		= srcu_torture_stats,
737 	.cbflood_max	= 50000,
738 	.irq_capable	= 1,
739 	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
740 	.name		= "srcu"
741 };
742 
743 static void srcu_torture_init(void)
744 {
745 	rcu_sync_torture_init();
746 	WARN_ON(init_srcu_struct(&srcu_ctld));
747 	srcu_ctlp = &srcu_ctld;
748 }
749 
750 static void srcu_torture_cleanup(void)
751 {
752 	cleanup_srcu_struct(&srcu_ctld);
753 	srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
754 }
755 
756 /* As above, but dynamically allocated. */
757 static struct rcu_torture_ops srcud_ops = {
758 	.ttype		= SRCU_FLAVOR,
759 	.init		= srcu_torture_init,
760 	.cleanup	= srcu_torture_cleanup,
761 	.readlock	= srcu_torture_read_lock,
762 	.read_delay	= srcu_read_delay,
763 	.readunlock	= srcu_torture_read_unlock,
764 	.readlock_held	= torture_srcu_read_lock_held,
765 	.get_gp_seq	= srcu_torture_completed,
766 	.deferred_free	= srcu_torture_deferred_free,
767 	.sync		= srcu_torture_synchronize,
768 	.exp_sync	= srcu_torture_synchronize_expedited,
769 	.get_gp_state	= srcu_torture_get_gp_state,
770 	.start_gp_poll	= srcu_torture_start_gp_poll,
771 	.poll_gp_state	= srcu_torture_poll_gp_state,
772 	.call		= srcu_torture_call,
773 	.cb_barrier	= srcu_torture_barrier,
774 	.stats		= srcu_torture_stats,
775 	.cbflood_max	= 50000,
776 	.irq_capable	= 1,
777 	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
778 	.name		= "srcud"
779 };
780 
781 /* As above, but broken due to inappropriate reader extension. */
782 static struct rcu_torture_ops busted_srcud_ops = {
783 	.ttype		= SRCU_FLAVOR,
784 	.init		= srcu_torture_init,
785 	.cleanup	= srcu_torture_cleanup,
786 	.readlock	= srcu_torture_read_lock,
787 	.read_delay	= rcu_read_delay,
788 	.readunlock	= srcu_torture_read_unlock,
789 	.readlock_held	= torture_srcu_read_lock_held,
790 	.get_gp_seq	= srcu_torture_completed,
791 	.deferred_free	= srcu_torture_deferred_free,
792 	.sync		= srcu_torture_synchronize,
793 	.exp_sync	= srcu_torture_synchronize_expedited,
794 	.call		= srcu_torture_call,
795 	.cb_barrier	= srcu_torture_barrier,
796 	.stats		= srcu_torture_stats,
797 	.irq_capable	= 1,
798 	.no_pi_lock	= IS_ENABLED(CONFIG_TINY_SRCU),
799 	.extendables	= RCUTORTURE_MAX_EXTEND,
800 	.name		= "busted_srcud"
801 };
802 
803 /*
804  * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
805  * This implementation does not necessarily work well with CPU hotplug.
806  */
807 
808 static void synchronize_rcu_trivial(void)
809 {
810 	int cpu;
811 
812 	for_each_online_cpu(cpu) {
813 		rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
814 		WARN_ON_ONCE(raw_smp_processor_id() != cpu);
815 	}
816 }
817 
818 static int rcu_torture_read_lock_trivial(void)
819 {
820 	preempt_disable();
821 	return 0;
822 }
823 
824 static void rcu_torture_read_unlock_trivial(int idx)
825 {
826 	preempt_enable();
827 }
828 
829 static struct rcu_torture_ops trivial_ops = {
830 	.ttype		= RCU_TRIVIAL_FLAVOR,
831 	.init		= rcu_sync_torture_init,
832 	.readlock	= rcu_torture_read_lock_trivial,
833 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
834 	.readunlock	= rcu_torture_read_unlock_trivial,
835 	.readlock_held	= torture_readlock_not_held,
836 	.get_gp_seq	= rcu_no_completed,
837 	.sync		= synchronize_rcu_trivial,
838 	.exp_sync	= synchronize_rcu_trivial,
839 	.fqs		= NULL,
840 	.stats		= NULL,
841 	.irq_capable	= 1,
842 	.name		= "trivial"
843 };
844 
845 #ifdef CONFIG_TASKS_RCU
846 
847 /*
848  * Definitions for RCU-tasks torture testing.
849  */
850 
851 static int tasks_torture_read_lock(void)
852 {
853 	return 0;
854 }
855 
856 static void tasks_torture_read_unlock(int idx)
857 {
858 }
859 
860 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
861 {
862 	call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
863 }
864 
865 static void synchronize_rcu_mult_test(void)
866 {
867 	synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry);
868 }
869 
870 static struct rcu_torture_ops tasks_ops = {
871 	.ttype		= RCU_TASKS_FLAVOR,
872 	.init		= rcu_sync_torture_init,
873 	.readlock	= tasks_torture_read_lock,
874 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
875 	.readunlock	= tasks_torture_read_unlock,
876 	.get_gp_seq	= rcu_no_completed,
877 	.deferred_free	= rcu_tasks_torture_deferred_free,
878 	.sync		= synchronize_rcu_tasks,
879 	.exp_sync	= synchronize_rcu_mult_test,
880 	.call		= call_rcu_tasks,
881 	.cb_barrier	= rcu_barrier_tasks,
882 	.gp_kthread_dbg	= show_rcu_tasks_classic_gp_kthread,
883 	.fqs		= NULL,
884 	.stats		= NULL,
885 	.irq_capable	= 1,
886 	.slow_gps	= 1,
887 	.name		= "tasks"
888 };
889 
890 #define TASKS_OPS &tasks_ops,
891 
892 #else // #ifdef CONFIG_TASKS_RCU
893 
894 #define TASKS_OPS
895 
896 #endif // #else #ifdef CONFIG_TASKS_RCU
897 
898 
899 #ifdef CONFIG_TASKS_RUDE_RCU
900 
901 /*
902  * Definitions for rude RCU-tasks torture testing.
903  */
904 
905 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
906 {
907 	call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
908 }
909 
910 static struct rcu_torture_ops tasks_rude_ops = {
911 	.ttype		= RCU_TASKS_RUDE_FLAVOR,
912 	.init		= rcu_sync_torture_init,
913 	.readlock	= rcu_torture_read_lock_trivial,
914 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
915 	.readunlock	= rcu_torture_read_unlock_trivial,
916 	.get_gp_seq	= rcu_no_completed,
917 	.deferred_free	= rcu_tasks_rude_torture_deferred_free,
918 	.sync		= synchronize_rcu_tasks_rude,
919 	.exp_sync	= synchronize_rcu_tasks_rude,
920 	.call		= call_rcu_tasks_rude,
921 	.cb_barrier	= rcu_barrier_tasks_rude,
922 	.gp_kthread_dbg	= show_rcu_tasks_rude_gp_kthread,
923 	.cbflood_max	= 50000,
924 	.fqs		= NULL,
925 	.stats		= NULL,
926 	.irq_capable	= 1,
927 	.name		= "tasks-rude"
928 };
929 
930 #define TASKS_RUDE_OPS &tasks_rude_ops,
931 
932 #else // #ifdef CONFIG_TASKS_RUDE_RCU
933 
934 #define TASKS_RUDE_OPS
935 
936 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU
937 
938 
939 #ifdef CONFIG_TASKS_TRACE_RCU
940 
941 /*
942  * Definitions for tracing RCU-tasks torture testing.
943  */
944 
945 static int tasks_tracing_torture_read_lock(void)
946 {
947 	rcu_read_lock_trace();
948 	return 0;
949 }
950 
951 static void tasks_tracing_torture_read_unlock(int idx)
952 {
953 	rcu_read_unlock_trace();
954 }
955 
956 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
957 {
958 	call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
959 }
960 
961 static struct rcu_torture_ops tasks_tracing_ops = {
962 	.ttype		= RCU_TASKS_TRACING_FLAVOR,
963 	.init		= rcu_sync_torture_init,
964 	.readlock	= tasks_tracing_torture_read_lock,
965 	.read_delay	= srcu_read_delay,  /* just reuse srcu's version. */
966 	.readunlock	= tasks_tracing_torture_read_unlock,
967 	.readlock_held	= rcu_read_lock_trace_held,
968 	.get_gp_seq	= rcu_no_completed,
969 	.deferred_free	= rcu_tasks_tracing_torture_deferred_free,
970 	.sync		= synchronize_rcu_tasks_trace,
971 	.exp_sync	= synchronize_rcu_tasks_trace,
972 	.call		= call_rcu_tasks_trace,
973 	.cb_barrier	= rcu_barrier_tasks_trace,
974 	.gp_kthread_dbg	= show_rcu_tasks_trace_gp_kthread,
975 	.cbflood_max	= 50000,
976 	.fqs		= NULL,
977 	.stats		= NULL,
978 	.irq_capable	= 1,
979 	.slow_gps	= 1,
980 	.name		= "tasks-tracing"
981 };
982 
983 #define TASKS_TRACING_OPS &tasks_tracing_ops,
984 
985 #else // #ifdef CONFIG_TASKS_TRACE_RCU
986 
987 #define TASKS_TRACING_OPS
988 
989 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
990 
991 
992 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
993 {
994 	if (!cur_ops->gp_diff)
995 		return new - old;
996 	return cur_ops->gp_diff(new, old);
997 }
998 
999 /*
1000  * RCU torture priority-boost testing.  Runs one real-time thread per
1001  * CPU for moderate bursts, repeatedly starting grace periods and waiting
1002  * for them to complete.  If a given grace period takes too long, we assume
1003  * that priority inversion has occurred.
1004  */
1005 
1006 static int old_rt_runtime = -1;
1007 
1008 static void rcu_torture_disable_rt_throttle(void)
1009 {
1010 	/*
1011 	 * Disable RT throttling so that rcutorture's boost threads don't get
1012 	 * throttled. Only possible if rcutorture is built-in otherwise the
1013 	 * user should manually do this by setting the sched_rt_period_us and
1014 	 * sched_rt_runtime sysctls.
1015 	 */
1016 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
1017 		return;
1018 
1019 	old_rt_runtime = sysctl_sched_rt_runtime;
1020 	sysctl_sched_rt_runtime = -1;
1021 }
1022 
1023 static void rcu_torture_enable_rt_throttle(void)
1024 {
1025 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
1026 		return;
1027 
1028 	sysctl_sched_rt_runtime = old_rt_runtime;
1029 	old_rt_runtime = -1;
1030 }
1031 
1032 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
1033 {
1034 	int cpu;
1035 	static int dbg_done;
1036 	unsigned long end = jiffies;
1037 	bool gp_done;
1038 	unsigned long j;
1039 	static unsigned long last_persist;
1040 	unsigned long lp;
1041 	unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
1042 
1043 	if (end - *start > mininterval) {
1044 		// Recheck after checking time to avoid false positives.
1045 		smp_mb(); // Time check before grace-period check.
1046 		if (cur_ops->poll_gp_state(gp_state))
1047 			return false; // passed, though perhaps just barely
1048 		if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
1049 			// At most one persisted message per boost test.
1050 			j = jiffies;
1051 			lp = READ_ONCE(last_persist);
1052 			if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
1053 				pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
1054 			return false; // passed on a technicality
1055 		}
1056 		VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
1057 		n_rcu_torture_boost_failure++;
1058 		if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
1059 			pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
1060 				current->rt_priority, gp_state, end - *start);
1061 			cur_ops->gp_kthread_dbg();
1062 			// Recheck after print to flag grace period ending during splat.
1063 			gp_done = cur_ops->poll_gp_state(gp_state);
1064 			pr_info("Boost inversion: GP %lu %s.\n", gp_state,
1065 				gp_done ? "ended already" : "still pending");
1066 
1067 		}
1068 
1069 		return true; // failed
1070 	} else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
1071 		*start = jiffies;
1072 	}
1073 
1074 	return false; // passed
1075 }
1076 
1077 static int rcu_torture_boost(void *arg)
1078 {
1079 	unsigned long endtime;
1080 	unsigned long gp_state;
1081 	unsigned long gp_state_time;
1082 	unsigned long oldstarttime;
1083 
1084 	VERBOSE_TOROUT_STRING("rcu_torture_boost started");
1085 
1086 	/* Set real-time priority. */
1087 	sched_set_fifo_low(current);
1088 
1089 	/* Each pass through the following loop does one boost-test cycle. */
1090 	do {
1091 		bool failed = false; // Test failed already in this test interval
1092 		bool gp_initiated = false;
1093 
1094 		if (kthread_should_stop())
1095 			goto checkwait;
1096 
1097 		/* Wait for the next test interval. */
1098 		oldstarttime = READ_ONCE(boost_starttime);
1099 		while (time_before(jiffies, oldstarttime)) {
1100 			schedule_timeout_interruptible(oldstarttime - jiffies);
1101 			if (stutter_wait("rcu_torture_boost"))
1102 				sched_set_fifo_low(current);
1103 			if (torture_must_stop())
1104 				goto checkwait;
1105 		}
1106 
1107 		// Do one boost-test interval.
1108 		endtime = oldstarttime + test_boost_duration * HZ;
1109 		while (time_before(jiffies, endtime)) {
1110 			// Has current GP gone too long?
1111 			if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1112 				failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1113 			// If we don't have a grace period in flight, start one.
1114 			if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1115 				gp_state = cur_ops->start_gp_poll();
1116 				gp_initiated = true;
1117 				gp_state_time = jiffies;
1118 			}
1119 			if (stutter_wait("rcu_torture_boost")) {
1120 				sched_set_fifo_low(current);
1121 				// If the grace period already ended,
1122 				// we don't know when that happened, so
1123 				// start over.
1124 				if (cur_ops->poll_gp_state(gp_state))
1125 					gp_initiated = false;
1126 			}
1127 			if (torture_must_stop())
1128 				goto checkwait;
1129 		}
1130 
1131 		// In case the grace period extended beyond the end of the loop.
1132 		if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1133 			rcu_torture_boost_failed(gp_state, &gp_state_time);
1134 
1135 		/*
1136 		 * Set the start time of the next test interval.
1137 		 * Yes, this is vulnerable to long delays, but such
1138 		 * delays simply cause a false negative for the next
1139 		 * interval.  Besides, we are running at RT priority,
1140 		 * so delays should be relatively rare.
1141 		 */
1142 		while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
1143 			if (mutex_trylock(&boost_mutex)) {
1144 				if (oldstarttime == boost_starttime) {
1145 					WRITE_ONCE(boost_starttime,
1146 						   jiffies + test_boost_interval * HZ);
1147 					n_rcu_torture_boosts++;
1148 				}
1149 				mutex_unlock(&boost_mutex);
1150 				break;
1151 			}
1152 			schedule_timeout_uninterruptible(1);
1153 		}
1154 
1155 		/* Go do the stutter. */
1156 checkwait:	if (stutter_wait("rcu_torture_boost"))
1157 			sched_set_fifo_low(current);
1158 	} while (!torture_must_stop());
1159 
1160 	/* Clean up and exit. */
1161 	while (!kthread_should_stop()) {
1162 		torture_shutdown_absorb("rcu_torture_boost");
1163 		schedule_timeout_uninterruptible(1);
1164 	}
1165 	torture_kthread_stopping("rcu_torture_boost");
1166 	return 0;
1167 }
1168 
1169 /*
1170  * RCU torture force-quiescent-state kthread.  Repeatedly induces
1171  * bursts of calls to force_quiescent_state(), increasing the probability
1172  * of occurrence of some important types of race conditions.
1173  */
1174 static int
1175 rcu_torture_fqs(void *arg)
1176 {
1177 	unsigned long fqs_resume_time;
1178 	int fqs_burst_remaining;
1179 	int oldnice = task_nice(current);
1180 
1181 	VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1182 	do {
1183 		fqs_resume_time = jiffies + fqs_stutter * HZ;
1184 		while (time_before(jiffies, fqs_resume_time) &&
1185 		       !kthread_should_stop()) {
1186 			schedule_timeout_interruptible(1);
1187 		}
1188 		fqs_burst_remaining = fqs_duration;
1189 		while (fqs_burst_remaining > 0 &&
1190 		       !kthread_should_stop()) {
1191 			cur_ops->fqs();
1192 			udelay(fqs_holdoff);
1193 			fqs_burst_remaining -= fqs_holdoff;
1194 		}
1195 		if (stutter_wait("rcu_torture_fqs"))
1196 			sched_set_normal(current, oldnice);
1197 	} while (!torture_must_stop());
1198 	torture_kthread_stopping("rcu_torture_fqs");
1199 	return 0;
1200 }
1201 
1202 // Used by writers to randomly choose from the available grace-period primitives.
1203 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { };
1204 static int nsynctypes;
1205 
1206 /*
1207  * Determine which grace-period primitives are available.
1208  */
1209 static void rcu_torture_write_types(void)
1210 {
1211 	bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full;
1212 	bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp;
1213 	bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
1214 	bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync;
1215 
1216 	/* Initialize synctype[] array.  If none set, take default. */
1217 	if (!gp_cond1 &&
1218 	    !gp_cond_exp1 &&
1219 	    !gp_cond_full1 &&
1220 	    !gp_cond_exp_full1 &&
1221 	    !gp_exp1 &&
1222 	    !gp_poll_exp1 &&
1223 	    !gp_poll_exp_full1 &&
1224 	    !gp_normal1 &&
1225 	    !gp_poll1 &&
1226 	    !gp_poll_full1 &&
1227 	    !gp_sync1) {
1228 		gp_cond1 = true;
1229 		gp_cond_exp1 = true;
1230 		gp_cond_full1 = true;
1231 		gp_cond_exp_full1 = true;
1232 		gp_exp1 = true;
1233 		gp_poll_exp1 = true;
1234 		gp_poll_exp_full1 = true;
1235 		gp_normal1 = true;
1236 		gp_poll1 = true;
1237 		gp_poll_full1 = true;
1238 		gp_sync1 = true;
1239 	}
1240 	if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1241 		synctype[nsynctypes++] = RTWS_COND_GET;
1242 		pr_info("%s: Testing conditional GPs.\n", __func__);
1243 	} else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1244 		pr_alert("%s: gp_cond without primitives.\n", __func__);
1245 	}
1246 	if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) {
1247 		synctype[nsynctypes++] = RTWS_COND_GET_EXP;
1248 		pr_info("%s: Testing conditional expedited GPs.\n", __func__);
1249 	} else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
1250 		pr_alert("%s: gp_cond_exp without primitives.\n", __func__);
1251 	}
1252 	if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) {
1253 		synctype[nsynctypes++] = RTWS_COND_GET_FULL;
1254 		pr_info("%s: Testing conditional full-state GPs.\n", __func__);
1255 	} else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) {
1256 		pr_alert("%s: gp_cond_full without primitives.\n", __func__);
1257 	}
1258 	if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) {
1259 		synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL;
1260 		pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__);
1261 	} else if (gp_cond_exp_full &&
1262 		   (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) {
1263 		pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__);
1264 	}
1265 	if (gp_exp1 && cur_ops->exp_sync) {
1266 		synctype[nsynctypes++] = RTWS_EXP_SYNC;
1267 		pr_info("%s: Testing expedited GPs.\n", __func__);
1268 	} else if (gp_exp && !cur_ops->exp_sync) {
1269 		pr_alert("%s: gp_exp without primitives.\n", __func__);
1270 	}
1271 	if (gp_normal1 && cur_ops->deferred_free) {
1272 		synctype[nsynctypes++] = RTWS_DEF_FREE;
1273 		pr_info("%s: Testing asynchronous GPs.\n", __func__);
1274 	} else if (gp_normal && !cur_ops->deferred_free) {
1275 		pr_alert("%s: gp_normal without primitives.\n", __func__);
1276 	}
1277 	if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state &&
1278 	    cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1279 		synctype[nsynctypes++] = RTWS_POLL_GET;
1280 		pr_info("%s: Testing polling GPs.\n", __func__);
1281 	} else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1282 		pr_alert("%s: gp_poll without primitives.\n", __func__);
1283 	}
1284 	if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full
1285 	    && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) {
1286 		synctype[nsynctypes++] = RTWS_POLL_GET_FULL;
1287 		pr_info("%s: Testing polling full-state GPs.\n", __func__);
1288 	} else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) {
1289 		pr_alert("%s: gp_poll_full without primitives.\n", __func__);
1290 	}
1291 	if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
1292 		synctype[nsynctypes++] = RTWS_POLL_GET_EXP;
1293 		pr_info("%s: Testing polling expedited GPs.\n", __func__);
1294 	} else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
1295 		pr_alert("%s: gp_poll_exp without primitives.\n", __func__);
1296 	}
1297 	if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) {
1298 		synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL;
1299 		pr_info("%s: Testing polling full-state expedited GPs.\n", __func__);
1300 	} else if (gp_poll_exp_full &&
1301 		   (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) {
1302 		pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__);
1303 	}
1304 	if (gp_sync1 && cur_ops->sync) {
1305 		synctype[nsynctypes++] = RTWS_SYNC;
1306 		pr_info("%s: Testing normal GPs.\n", __func__);
1307 	} else if (gp_sync && !cur_ops->sync) {
1308 		pr_alert("%s: gp_sync without primitives.\n", __func__);
1309 	}
1310 }
1311 
1312 /*
1313  * Do the specified rcu_torture_writer() synchronous grace period,
1314  * while also testing out the polled APIs.  Note well that the single-CPU
1315  * grace-period optimizations must be accounted for.
1316  */
1317 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void))
1318 {
1319 	unsigned long cookie;
1320 	struct rcu_gp_oldstate cookie_full;
1321 	bool dopoll;
1322 	bool dopoll_full;
1323 	unsigned long r = torture_random(trsp);
1324 
1325 	dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300);
1326 	dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00);
1327 	if (dopoll || dopoll_full)
1328 		cpus_read_lock();
1329 	if (dopoll)
1330 		cookie = cur_ops->get_gp_state();
1331 	if (dopoll_full)
1332 		cur_ops->get_gp_state_full(&cookie_full);
1333 	if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full))
1334 		sync();
1335 	sync();
1336 	WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie),
1337 		  "%s: Cookie check 3 failed %pS() online %*pbl.",
1338 		  __func__, sync, cpumask_pr_args(cpu_online_mask));
1339 	WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full),
1340 		  "%s: Cookie check 4 failed %pS() online %*pbl",
1341 		  __func__, sync, cpumask_pr_args(cpu_online_mask));
1342 	if (dopoll || dopoll_full)
1343 		cpus_read_unlock();
1344 }
1345 
1346 /*
1347  * RCU torture writer kthread.  Repeatedly substitutes a new structure
1348  * for that pointed to by rcu_torture_current, freeing the old structure
1349  * after a series of grace periods (the "pipeline").
1350  */
1351 static int
1352 rcu_torture_writer(void *arg)
1353 {
1354 	bool boot_ended;
1355 	bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1356 	unsigned long cookie;
1357 	struct rcu_gp_oldstate cookie_full;
1358 	int expediting = 0;
1359 	unsigned long gp_snap;
1360 	unsigned long gp_snap1;
1361 	struct rcu_gp_oldstate gp_snap_full;
1362 	struct rcu_gp_oldstate gp_snap1_full;
1363 	int i;
1364 	int idx;
1365 	int oldnice = task_nice(current);
1366 	struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE];
1367 	struct rcu_torture *rp;
1368 	struct rcu_torture *old_rp;
1369 	static DEFINE_TORTURE_RANDOM(rand);
1370 	bool stutter_waited;
1371 	unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE];
1372 
1373 	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1374 	if (!can_expedite)
1375 		pr_alert("%s" TORTURE_FLAG
1376 			 " GP expediting controlled from boot/sysfs for %s.\n",
1377 			 torture_type, cur_ops->name);
1378 	if (WARN_ONCE(nsynctypes == 0,
1379 		      "%s: No update-side primitives.\n", __func__)) {
1380 		/*
1381 		 * No updates primitives, so don't try updating.
1382 		 * The resulting test won't be testing much, hence the
1383 		 * above WARN_ONCE().
1384 		 */
1385 		rcu_torture_writer_state = RTWS_STOPPING;
1386 		torture_kthread_stopping("rcu_torture_writer");
1387 		return 0;
1388 	}
1389 
1390 	do {
1391 		rcu_torture_writer_state = RTWS_FIXED_DELAY;
1392 		torture_hrtimeout_us(500, 1000, &rand);
1393 		rp = rcu_torture_alloc();
1394 		if (rp == NULL)
1395 			continue;
1396 		rp->rtort_pipe_count = 0;
1397 		rcu_torture_writer_state = RTWS_DELAY;
1398 		udelay(torture_random(&rand) & 0x3ff);
1399 		rcu_torture_writer_state = RTWS_REPLACE;
1400 		old_rp = rcu_dereference_check(rcu_torture_current,
1401 					       current == writer_task);
1402 		rp->rtort_mbtest = 1;
1403 		rcu_assign_pointer(rcu_torture_current, rp);
1404 		smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1405 		if (old_rp) {
1406 			i = old_rp->rtort_pipe_count;
1407 			if (i > RCU_TORTURE_PIPE_LEN)
1408 				i = RCU_TORTURE_PIPE_LEN;
1409 			atomic_inc(&rcu_torture_wcount[i]);
1410 			WRITE_ONCE(old_rp->rtort_pipe_count,
1411 				   old_rp->rtort_pipe_count + 1);
1412 
1413 			// Make sure readers block polled grace periods.
1414 			if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1415 				idx = cur_ops->readlock();
1416 				cookie = cur_ops->get_gp_state();
1417 				WARN_ONCE(cur_ops->poll_gp_state(cookie),
1418 					  "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1419 					  __func__,
1420 					  rcu_torture_writer_state_getname(),
1421 					  rcu_torture_writer_state,
1422 					  cookie, cur_ops->get_gp_state());
1423 				if (cur_ops->get_gp_completed) {
1424 					cookie = cur_ops->get_gp_completed();
1425 					WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
1426 				}
1427 				cur_ops->readunlock(idx);
1428 			}
1429 			if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
1430 				idx = cur_ops->readlock();
1431 				cur_ops->get_gp_state_full(&cookie_full);
1432 				WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
1433 					  "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
1434 					  __func__,
1435 					  rcu_torture_writer_state_getname(),
1436 					  rcu_torture_writer_state,
1437 					  cpumask_pr_args(cpu_online_mask));
1438 				if (cur_ops->get_gp_completed_full) {
1439 					cur_ops->get_gp_completed_full(&cookie_full);
1440 					WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
1441 				}
1442 				cur_ops->readunlock(idx);
1443 			}
1444 			switch (synctype[torture_random(&rand) % nsynctypes]) {
1445 			case RTWS_DEF_FREE:
1446 				rcu_torture_writer_state = RTWS_DEF_FREE;
1447 				cur_ops->deferred_free(old_rp);
1448 				break;
1449 			case RTWS_EXP_SYNC:
1450 				rcu_torture_writer_state = RTWS_EXP_SYNC;
1451 				do_rtws_sync(&rand, cur_ops->exp_sync);
1452 				rcu_torture_pipe_update(old_rp);
1453 				break;
1454 			case RTWS_COND_GET:
1455 				rcu_torture_writer_state = RTWS_COND_GET;
1456 				gp_snap = cur_ops->get_gp_state();
1457 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1458 				rcu_torture_writer_state = RTWS_COND_SYNC;
1459 				cur_ops->cond_sync(gp_snap);
1460 				rcu_torture_pipe_update(old_rp);
1461 				break;
1462 			case RTWS_COND_GET_EXP:
1463 				rcu_torture_writer_state = RTWS_COND_GET_EXP;
1464 				gp_snap = cur_ops->get_gp_state_exp();
1465 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1466 				rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
1467 				cur_ops->cond_sync_exp(gp_snap);
1468 				rcu_torture_pipe_update(old_rp);
1469 				break;
1470 			case RTWS_COND_GET_FULL:
1471 				rcu_torture_writer_state = RTWS_COND_GET_FULL;
1472 				cur_ops->get_gp_state_full(&gp_snap_full);
1473 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1474 				rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
1475 				cur_ops->cond_sync_full(&gp_snap_full);
1476 				rcu_torture_pipe_update(old_rp);
1477 				break;
1478 			case RTWS_COND_GET_EXP_FULL:
1479 				rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
1480 				cur_ops->get_gp_state_full(&gp_snap_full);
1481 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1482 				rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
1483 				cur_ops->cond_sync_exp_full(&gp_snap_full);
1484 				rcu_torture_pipe_update(old_rp);
1485 				break;
1486 			case RTWS_POLL_GET:
1487 				rcu_torture_writer_state = RTWS_POLL_GET;
1488 				for (i = 0; i < ARRAY_SIZE(ulo); i++)
1489 					ulo[i] = cur_ops->get_comp_state();
1490 				gp_snap = cur_ops->start_gp_poll();
1491 				rcu_torture_writer_state = RTWS_POLL_WAIT;
1492 				while (!cur_ops->poll_gp_state(gp_snap)) {
1493 					gp_snap1 = cur_ops->get_gp_state();
1494 					for (i = 0; i < ARRAY_SIZE(ulo); i++)
1495 						if (cur_ops->poll_gp_state(ulo[i]) ||
1496 						    cur_ops->same_gp_state(ulo[i], gp_snap1)) {
1497 							ulo[i] = gp_snap1;
1498 							break;
1499 						}
1500 					WARN_ON_ONCE(i >= ARRAY_SIZE(ulo));
1501 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1502 								  &rand);
1503 				}
1504 				rcu_torture_pipe_update(old_rp);
1505 				break;
1506 			case RTWS_POLL_GET_FULL:
1507 				rcu_torture_writer_state = RTWS_POLL_GET_FULL;
1508 				for (i = 0; i < ARRAY_SIZE(rgo); i++)
1509 					cur_ops->get_comp_state_full(&rgo[i]);
1510 				cur_ops->start_gp_poll_full(&gp_snap_full);
1511 				rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
1512 				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1513 					cur_ops->get_gp_state_full(&gp_snap1_full);
1514 					for (i = 0; i < ARRAY_SIZE(rgo); i++)
1515 						if (cur_ops->poll_gp_state_full(&rgo[i]) ||
1516 						    cur_ops->same_gp_state_full(&rgo[i],
1517 										&gp_snap1_full)) {
1518 							rgo[i] = gp_snap1_full;
1519 							break;
1520 						}
1521 					WARN_ON_ONCE(i >= ARRAY_SIZE(rgo));
1522 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1523 								  &rand);
1524 				}
1525 				rcu_torture_pipe_update(old_rp);
1526 				break;
1527 			case RTWS_POLL_GET_EXP:
1528 				rcu_torture_writer_state = RTWS_POLL_GET_EXP;
1529 				gp_snap = cur_ops->start_gp_poll_exp();
1530 				rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
1531 				while (!cur_ops->poll_gp_state_exp(gp_snap))
1532 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1533 								  &rand);
1534 				rcu_torture_pipe_update(old_rp);
1535 				break;
1536 			case RTWS_POLL_GET_EXP_FULL:
1537 				rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL;
1538 				cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1539 				rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
1540 				while (!cur_ops->poll_gp_state_full(&gp_snap_full))
1541 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1542 								  &rand);
1543 				rcu_torture_pipe_update(old_rp);
1544 				break;
1545 			case RTWS_SYNC:
1546 				rcu_torture_writer_state = RTWS_SYNC;
1547 				do_rtws_sync(&rand, cur_ops->sync);
1548 				rcu_torture_pipe_update(old_rp);
1549 				break;
1550 			default:
1551 				WARN_ON_ONCE(1);
1552 				break;
1553 			}
1554 		}
1555 		WRITE_ONCE(rcu_torture_current_version,
1556 			   rcu_torture_current_version + 1);
1557 		/* Cycle through nesting levels of rcu_expedite_gp() calls. */
1558 		if (can_expedite &&
1559 		    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1560 			WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1561 			if (expediting >= 0)
1562 				rcu_expedite_gp();
1563 			else
1564 				rcu_unexpedite_gp();
1565 			if (++expediting > 3)
1566 				expediting = -expediting;
1567 		} else if (!can_expedite) { /* Disabled during boot, recheck. */
1568 			can_expedite = !rcu_gp_is_expedited() &&
1569 				       !rcu_gp_is_normal();
1570 		}
1571 		rcu_torture_writer_state = RTWS_STUTTER;
1572 		boot_ended = rcu_inkernel_boot_has_ended();
1573 		stutter_waited = stutter_wait("rcu_torture_writer");
1574 		if (stutter_waited &&
1575 		    !atomic_read(&rcu_fwd_cb_nodelay) &&
1576 		    !cur_ops->slow_gps &&
1577 		    !torture_must_stop() &&
1578 		    boot_ended)
1579 			for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1580 				if (list_empty(&rcu_tortures[i].rtort_free) &&
1581 				    rcu_access_pointer(rcu_torture_current) !=
1582 				    &rcu_tortures[i]) {
1583 					tracing_off();
1584 					WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1585 					rcu_ftrace_dump(DUMP_ALL);
1586 				}
1587 		if (stutter_waited)
1588 			sched_set_normal(current, oldnice);
1589 	} while (!torture_must_stop());
1590 	rcu_torture_current = NULL;  // Let stats task know that we are done.
1591 	/* Reset expediting back to unexpedited. */
1592 	if (expediting > 0)
1593 		expediting = -expediting;
1594 	while (can_expedite && expediting++ < 0)
1595 		rcu_unexpedite_gp();
1596 	WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1597 	if (!can_expedite)
1598 		pr_alert("%s" TORTURE_FLAG
1599 			 " Dynamic grace-period expediting was disabled.\n",
1600 			 torture_type);
1601 	rcu_torture_writer_state = RTWS_STOPPING;
1602 	torture_kthread_stopping("rcu_torture_writer");
1603 	return 0;
1604 }
1605 
1606 /*
1607  * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
1608  * delay between calls.
1609  */
1610 static int
1611 rcu_torture_fakewriter(void *arg)
1612 {
1613 	unsigned long gp_snap;
1614 	struct rcu_gp_oldstate gp_snap_full;
1615 	DEFINE_TORTURE_RANDOM(rand);
1616 
1617 	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1618 	set_user_nice(current, MAX_NICE);
1619 
1620 	if (WARN_ONCE(nsynctypes == 0,
1621 		      "%s: No update-side primitives.\n", __func__)) {
1622 		/*
1623 		 * No updates primitives, so don't try updating.
1624 		 * The resulting test won't be testing much, hence the
1625 		 * above WARN_ONCE().
1626 		 */
1627 		torture_kthread_stopping("rcu_torture_fakewriter");
1628 		return 0;
1629 	}
1630 
1631 	do {
1632 		torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1633 		if (cur_ops->cb_barrier != NULL &&
1634 		    torture_random(&rand) % (nfakewriters * 8) == 0) {
1635 			cur_ops->cb_barrier();
1636 		} else {
1637 			switch (synctype[torture_random(&rand) % nsynctypes]) {
1638 			case RTWS_DEF_FREE:
1639 				break;
1640 			case RTWS_EXP_SYNC:
1641 				cur_ops->exp_sync();
1642 				break;
1643 			case RTWS_COND_GET:
1644 				gp_snap = cur_ops->get_gp_state();
1645 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1646 				cur_ops->cond_sync(gp_snap);
1647 				break;
1648 			case RTWS_COND_GET_EXP:
1649 				gp_snap = cur_ops->get_gp_state_exp();
1650 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1651 				cur_ops->cond_sync_exp(gp_snap);
1652 				break;
1653 			case RTWS_COND_GET_FULL:
1654 				cur_ops->get_gp_state_full(&gp_snap_full);
1655 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1656 				cur_ops->cond_sync_full(&gp_snap_full);
1657 				break;
1658 			case RTWS_COND_GET_EXP_FULL:
1659 				cur_ops->get_gp_state_full(&gp_snap_full);
1660 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1661 				cur_ops->cond_sync_exp_full(&gp_snap_full);
1662 				break;
1663 			case RTWS_POLL_GET:
1664 				gp_snap = cur_ops->start_gp_poll();
1665 				while (!cur_ops->poll_gp_state(gp_snap)) {
1666 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1667 								  &rand);
1668 				}
1669 				break;
1670 			case RTWS_POLL_GET_FULL:
1671 				cur_ops->start_gp_poll_full(&gp_snap_full);
1672 				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1673 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1674 								  &rand);
1675 				}
1676 				break;
1677 			case RTWS_POLL_GET_EXP:
1678 				gp_snap = cur_ops->start_gp_poll_exp();
1679 				while (!cur_ops->poll_gp_state_exp(gp_snap)) {
1680 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1681 								  &rand);
1682 				}
1683 				break;
1684 			case RTWS_POLL_GET_EXP_FULL:
1685 				cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1686 				while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1687 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1688 								  &rand);
1689 				}
1690 				break;
1691 			case RTWS_SYNC:
1692 				cur_ops->sync();
1693 				break;
1694 			default:
1695 				WARN_ON_ONCE(1);
1696 				break;
1697 			}
1698 		}
1699 		stutter_wait("rcu_torture_fakewriter");
1700 	} while (!torture_must_stop());
1701 
1702 	torture_kthread_stopping("rcu_torture_fakewriter");
1703 	return 0;
1704 }
1705 
1706 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1707 {
1708 	kfree(rhp);
1709 }
1710 
1711 // Set up and carry out testing of RCU's global memory ordering
1712 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1713 					struct torture_random_state *trsp)
1714 {
1715 	unsigned long loops;
1716 	int noc = torture_num_online_cpus();
1717 	int rdrchked;
1718 	int rdrchker;
1719 	struct rcu_torture_reader_check *rtrcp; // Me.
1720 	struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1721 	struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1722 	struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1723 
1724 	if (myid < 0)
1725 		return; // Don't try this from timer handlers.
1726 
1727 	// Increment my counter.
1728 	rtrcp = &rcu_torture_reader_mbchk[myid];
1729 	WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1730 
1731 	// Attempt to assign someone else some checking work.
1732 	rdrchked = torture_random(trsp) % nrealreaders;
1733 	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1734 	rdrchker = torture_random(trsp) % nrealreaders;
1735 	rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1736 	if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1737 	    smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1738 	    !READ_ONCE(rtp->rtort_chkp) &&
1739 	    !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1740 		rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1741 		WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1742 		rtrcp->rtc_chkrdr = rdrchked;
1743 		WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1744 		if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1745 		    cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1746 			(void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1747 	}
1748 
1749 	// If assigned some completed work, do it!
1750 	rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1751 	if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1752 		return; // No work or work not yet ready.
1753 	rdrchked = rtrcp_assigner->rtc_chkrdr;
1754 	if (WARN_ON_ONCE(rdrchked < 0))
1755 		return;
1756 	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1757 	loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1758 	atomic_inc(&n_rcu_torture_mbchk_tries);
1759 	if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1760 		atomic_inc(&n_rcu_torture_mbchk_fail);
1761 	rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1762 	rtrcp_assigner->rtc_ready = 0;
1763 	smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1764 	smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1765 }
1766 
1767 /*
1768  * Do one extension of an RCU read-side critical section using the
1769  * current reader state in readstate (set to zero for initial entry
1770  * to extended critical section), set the new state as specified by
1771  * newstate (set to zero for final exit from extended critical section),
1772  * and random-number-generator state in trsp.  If this is neither the
1773  * beginning or end of the critical section and if there was actually a
1774  * change, do a ->read_delay().
1775  */
1776 static void rcutorture_one_extend(int *readstate, int newstate,
1777 				  struct torture_random_state *trsp,
1778 				  struct rt_read_seg *rtrsp)
1779 {
1780 	unsigned long flags;
1781 	int idxnew1 = -1;
1782 	int idxnew2 = -1;
1783 	int idxold1 = *readstate;
1784 	int idxold2 = idxold1;
1785 	int statesnew = ~*readstate & newstate;
1786 	int statesold = *readstate & ~newstate;
1787 
1788 	WARN_ON_ONCE(idxold2 < 0);
1789 	WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1790 	rtrsp->rt_readstate = newstate;
1791 
1792 	/* First, put new protection in place to avoid critical-section gap. */
1793 	if (statesnew & RCUTORTURE_RDR_BH)
1794 		local_bh_disable();
1795 	if (statesnew & RCUTORTURE_RDR_RBH)
1796 		rcu_read_lock_bh();
1797 	if (statesnew & RCUTORTURE_RDR_IRQ)
1798 		local_irq_disable();
1799 	if (statesnew & RCUTORTURE_RDR_PREEMPT)
1800 		preempt_disable();
1801 	if (statesnew & RCUTORTURE_RDR_SCHED)
1802 		rcu_read_lock_sched();
1803 	if (statesnew & RCUTORTURE_RDR_RCU_1)
1804 		idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1;
1805 	if (statesnew & RCUTORTURE_RDR_RCU_2)
1806 		idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2;
1807 
1808 	/*
1809 	 * Next, remove old protection, in decreasing order of strength
1810 	 * to avoid unlock paths that aren't safe in the stronger
1811 	 * context. Namely: BH can not be enabled with disabled interrupts.
1812 	 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
1813 	 * context.
1814 	 */
1815 	if (statesold & RCUTORTURE_RDR_IRQ)
1816 		local_irq_enable();
1817 	if (statesold & RCUTORTURE_RDR_PREEMPT)
1818 		preempt_enable();
1819 	if (statesold & RCUTORTURE_RDR_SCHED)
1820 		rcu_read_unlock_sched();
1821 	if (statesold & RCUTORTURE_RDR_BH)
1822 		local_bh_enable();
1823 	if (statesold & RCUTORTURE_RDR_RBH)
1824 		rcu_read_unlock_bh();
1825 	if (statesold & RCUTORTURE_RDR_RCU_2) {
1826 		cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1);
1827 		WARN_ON_ONCE(idxnew2 != -1);
1828 		idxold2 = 0;
1829 	}
1830 	if (statesold & RCUTORTURE_RDR_RCU_1) {
1831 		bool lockit;
1832 
1833 		lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
1834 		if (lockit)
1835 			raw_spin_lock_irqsave(&current->pi_lock, flags);
1836 		cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1);
1837 		WARN_ON_ONCE(idxnew1 != -1);
1838 		idxold1 = 0;
1839 		if (lockit)
1840 			raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1841 	}
1842 
1843 	/* Delay if neither beginning nor end and there was a change. */
1844 	if ((statesnew || statesold) && *readstate && newstate)
1845 		cur_ops->read_delay(trsp, rtrsp);
1846 
1847 	/* Update the reader state. */
1848 	if (idxnew1 == -1)
1849 		idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
1850 	WARN_ON_ONCE(idxnew1 < 0);
1851 	if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1))
1852 		pr_info("Unexpected idxnew1 value of %#x\n", idxnew1);
1853 	if (idxnew2 == -1)
1854 		idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
1855 	WARN_ON_ONCE(idxnew2 < 0);
1856 	WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1857 	*readstate = idxnew1 | idxnew2 | newstate;
1858 	WARN_ON_ONCE(*readstate < 0);
1859 	if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1))
1860 		pr_info("Unexpected idxnew2 value of %#x\n", idxnew2);
1861 }
1862 
1863 /* Return the biggest extendables mask given current RCU and boot parameters. */
1864 static int rcutorture_extend_mask_max(void)
1865 {
1866 	int mask;
1867 
1868 	WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1869 	mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1870 	mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
1871 	return mask;
1872 }
1873 
1874 /* Return a random protection state mask, but with at least one bit set. */
1875 static int
1876 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1877 {
1878 	int mask = rcutorture_extend_mask_max();
1879 	unsigned long randmask1 = torture_random(trsp) >> 8;
1880 	unsigned long randmask2 = randmask1 >> 3;
1881 	unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1882 	unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
1883 	unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1884 
1885 	WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);
1886 	/* Mostly only one bit (need preemption!), sometimes lots of bits. */
1887 	if (!(randmask1 & 0x7))
1888 		mask = mask & randmask2;
1889 	else
1890 		mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1891 
1892 	// Can't have nested RCU reader without outer RCU reader.
1893 	if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
1894 		if (oldmask & RCUTORTURE_RDR_RCU_1)
1895 			mask &= ~RCUTORTURE_RDR_RCU_2;
1896 		else
1897 			mask |= RCUTORTURE_RDR_RCU_1;
1898 	}
1899 
1900 	/*
1901 	 * Can't enable bh w/irq disabled.
1902 	 */
1903 	if (mask & RCUTORTURE_RDR_IRQ)
1904 		mask |= oldmask & bhs;
1905 
1906 	/*
1907 	 * Ideally these sequences would be detected in debug builds
1908 	 * (regardless of RT), but until then don't stop testing
1909 	 * them on non-RT.
1910 	 */
1911 	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1912 		/* Can't modify BH in atomic context */
1913 		if (oldmask & preempts_irq)
1914 			mask &= ~bhs;
1915 		if ((oldmask | mask) & preempts_irq)
1916 			mask |= oldmask & bhs;
1917 	}
1918 
1919 	return mask ?: RCUTORTURE_RDR_RCU_1;
1920 }
1921 
1922 /*
1923  * Do a randomly selected number of extensions of an existing RCU read-side
1924  * critical section.
1925  */
1926 static struct rt_read_seg *
1927 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1928 		       struct rt_read_seg *rtrsp)
1929 {
1930 	int i;
1931 	int j;
1932 	int mask = rcutorture_extend_mask_max();
1933 
1934 	WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1935 	if (!((mask - 1) & mask))
1936 		return rtrsp;  /* Current RCU reader not extendable. */
1937 	/* Bias towards larger numbers of loops. */
1938 	i = (torture_random(trsp) >> 3);
1939 	i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1940 	for (j = 0; j < i; j++) {
1941 		mask = rcutorture_extend_mask(*readstate, trsp);
1942 		rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1943 	}
1944 	return &rtrsp[j];
1945 }
1946 
1947 /*
1948  * Do one read-side critical section, returning false if there was
1949  * no data to read.  Can be invoked both from process context and
1950  * from a timer handler.
1951  */
1952 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1953 {
1954 	bool checkpolling = !(torture_random(trsp) & 0xfff);
1955 	unsigned long cookie;
1956 	struct rcu_gp_oldstate cookie_full;
1957 	int i;
1958 	unsigned long started;
1959 	unsigned long completed;
1960 	int newstate;
1961 	struct rcu_torture *p;
1962 	int pipe_count;
1963 	int readstate = 0;
1964 	struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1965 	struct rt_read_seg *rtrsp = &rtseg[0];
1966 	struct rt_read_seg *rtrsp1;
1967 	unsigned long long ts;
1968 
1969 	WARN_ON_ONCE(!rcu_is_watching());
1970 	newstate = rcutorture_extend_mask(readstate, trsp);
1971 	rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1972 	if (checkpolling) {
1973 		if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1974 			cookie = cur_ops->get_gp_state();
1975 		if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
1976 			cur_ops->get_gp_state_full(&cookie_full);
1977 	}
1978 	started = cur_ops->get_gp_seq();
1979 	ts = rcu_trace_clock_local();
1980 	p = rcu_dereference_check(rcu_torture_current,
1981 				  !cur_ops->readlock_held || cur_ops->readlock_held());
1982 	if (p == NULL) {
1983 		/* Wait for rcu_torture_writer to get underway */
1984 		rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1985 		return false;
1986 	}
1987 	if (p->rtort_mbtest == 0)
1988 		atomic_inc(&n_rcu_torture_mberror);
1989 	rcu_torture_reader_do_mbchk(myid, p, trsp);
1990 	rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1991 	preempt_disable();
1992 	pipe_count = READ_ONCE(p->rtort_pipe_count);
1993 	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1994 		/* Should not happen, but... */
1995 		pipe_count = RCU_TORTURE_PIPE_LEN;
1996 	}
1997 	completed = cur_ops->get_gp_seq();
1998 	if (pipe_count > 1) {
1999 		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
2000 					  ts, started, completed);
2001 		rcu_ftrace_dump(DUMP_ALL);
2002 	}
2003 	__this_cpu_inc(rcu_torture_count[pipe_count]);
2004 	completed = rcutorture_seq_diff(completed, started);
2005 	if (completed > RCU_TORTURE_PIPE_LEN) {
2006 		/* Should not happen, but... */
2007 		completed = RCU_TORTURE_PIPE_LEN;
2008 	}
2009 	__this_cpu_inc(rcu_torture_batch[completed]);
2010 	preempt_enable();
2011 	if (checkpolling) {
2012 		if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2013 			WARN_ONCE(cur_ops->poll_gp_state(cookie),
2014 				  "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
2015 				  __func__,
2016 				  rcu_torture_writer_state_getname(),
2017 				  rcu_torture_writer_state,
2018 				  cookie, cur_ops->get_gp_state());
2019 		if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2020 			WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
2021 				  "%s: Cookie check 6 failed %s(%d) online %*pbl\n",
2022 				  __func__,
2023 				  rcu_torture_writer_state_getname(),
2024 				  rcu_torture_writer_state,
2025 				  cpumask_pr_args(cpu_online_mask));
2026 	}
2027 	rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
2028 	WARN_ON_ONCE(readstate);
2029 	// This next splat is expected behavior if leakpointer, especially
2030 	// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
2031 	WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
2032 
2033 	/* If error or close call, record the sequence of reader protections. */
2034 	if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
2035 		i = 0;
2036 		for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
2037 			err_segs[i++] = *rtrsp1;
2038 		rt_read_nsegs = i;
2039 	}
2040 
2041 	return true;
2042 }
2043 
2044 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
2045 
2046 /*
2047  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
2048  * incrementing the corresponding element of the pipeline array.  The
2049  * counter in the element should never be greater than 1, otherwise, the
2050  * RCU implementation is broken.
2051  */
2052 static void rcu_torture_timer(struct timer_list *unused)
2053 {
2054 	atomic_long_inc(&n_rcu_torture_timers);
2055 	(void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
2056 
2057 	/* Test call_rcu() invocation from interrupt handler. */
2058 	if (cur_ops->call) {
2059 		struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
2060 
2061 		if (rhp)
2062 			cur_ops->call(rhp, rcu_torture_timer_cb);
2063 	}
2064 }
2065 
2066 /*
2067  * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
2068  * incrementing the corresponding element of the pipeline array.  The
2069  * counter in the element should never be greater than 1, otherwise, the
2070  * RCU implementation is broken.
2071  */
2072 static int
2073 rcu_torture_reader(void *arg)
2074 {
2075 	unsigned long lastsleep = jiffies;
2076 	long myid = (long)arg;
2077 	int mynumonline = myid;
2078 	DEFINE_TORTURE_RANDOM(rand);
2079 	struct timer_list t;
2080 
2081 	VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
2082 	set_user_nice(current, MAX_NICE);
2083 	if (irqreader && cur_ops->irq_capable)
2084 		timer_setup_on_stack(&t, rcu_torture_timer, 0);
2085 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2086 	do {
2087 		if (irqreader && cur_ops->irq_capable) {
2088 			if (!timer_pending(&t))
2089 				mod_timer(&t, jiffies + 1);
2090 		}
2091 		if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
2092 			schedule_timeout_interruptible(HZ);
2093 		if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
2094 			torture_hrtimeout_us(500, 1000, &rand);
2095 			lastsleep = jiffies + 10;
2096 		}
2097 		while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
2098 			schedule_timeout_interruptible(HZ / 5);
2099 		stutter_wait("rcu_torture_reader");
2100 	} while (!torture_must_stop());
2101 	if (irqreader && cur_ops->irq_capable) {
2102 		del_timer_sync(&t);
2103 		destroy_timer_on_stack(&t);
2104 	}
2105 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2106 	torture_kthread_stopping("rcu_torture_reader");
2107 	return 0;
2108 }
2109 
2110 /*
2111  * Randomly Toggle CPUs' callback-offload state.  This uses hrtimers to
2112  * increase race probabilities and fuzzes the interval between toggling.
2113  */
2114 static int rcu_nocb_toggle(void *arg)
2115 {
2116 	int cpu;
2117 	int maxcpu = -1;
2118 	int oldnice = task_nice(current);
2119 	long r;
2120 	DEFINE_TORTURE_RANDOM(rand);
2121 	ktime_t toggle_delay;
2122 	unsigned long toggle_fuzz;
2123 	ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
2124 
2125 	VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
2126 	while (!rcu_inkernel_boot_has_ended())
2127 		schedule_timeout_interruptible(HZ / 10);
2128 	for_each_online_cpu(cpu)
2129 		maxcpu = cpu;
2130 	WARN_ON(maxcpu < 0);
2131 	if (toggle_interval > ULONG_MAX)
2132 		toggle_fuzz = ULONG_MAX >> 3;
2133 	else
2134 		toggle_fuzz = toggle_interval >> 3;
2135 	if (toggle_fuzz <= 0)
2136 		toggle_fuzz = NSEC_PER_USEC;
2137 	do {
2138 		r = torture_random(&rand);
2139 		cpu = (r >> 4) % (maxcpu + 1);
2140 		if (r & 0x1) {
2141 			rcu_nocb_cpu_offload(cpu);
2142 			atomic_long_inc(&n_nocb_offload);
2143 		} else {
2144 			rcu_nocb_cpu_deoffload(cpu);
2145 			atomic_long_inc(&n_nocb_deoffload);
2146 		}
2147 		toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
2148 		set_current_state(TASK_INTERRUPTIBLE);
2149 		schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
2150 		if (stutter_wait("rcu_nocb_toggle"))
2151 			sched_set_normal(current, oldnice);
2152 	} while (!torture_must_stop());
2153 	torture_kthread_stopping("rcu_nocb_toggle");
2154 	return 0;
2155 }
2156 
2157 /*
2158  * Print torture statistics.  Caller must ensure that there is only
2159  * one call to this function at a given time!!!  This is normally
2160  * accomplished by relying on the module system to only have one copy
2161  * of the module loaded, and then by giving the rcu_torture_stats
2162  * kthread full control (or the init/cleanup functions when rcu_torture_stats
2163  * thread is not running).
2164  */
2165 static void
2166 rcu_torture_stats_print(void)
2167 {
2168 	int cpu;
2169 	int i;
2170 	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2171 	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2172 	struct rcu_torture *rtcp;
2173 	static unsigned long rtcv_snap = ULONG_MAX;
2174 	static bool splatted;
2175 	struct task_struct *wtp;
2176 
2177 	for_each_possible_cpu(cpu) {
2178 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2179 			pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
2180 			batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
2181 		}
2182 	}
2183 	for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) {
2184 		if (pipesummary[i] != 0)
2185 			break;
2186 	}
2187 
2188 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2189 	rtcp = rcu_access_pointer(rcu_torture_current);
2190 	pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
2191 		rtcp,
2192 		rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
2193 		rcu_torture_current_version,
2194 		list_empty(&rcu_torture_freelist),
2195 		atomic_read(&n_rcu_torture_alloc),
2196 		atomic_read(&n_rcu_torture_alloc_fail),
2197 		atomic_read(&n_rcu_torture_free));
2198 	pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ",
2199 		atomic_read(&n_rcu_torture_mberror),
2200 		atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
2201 		n_rcu_torture_barrier_error,
2202 		n_rcu_torture_boost_ktrerror);
2203 	pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
2204 		n_rcu_torture_boost_failure,
2205 		n_rcu_torture_boosts,
2206 		atomic_long_read(&n_rcu_torture_timers));
2207 	torture_onoff_stats();
2208 	pr_cont("barrier: %ld/%ld:%ld ",
2209 		data_race(n_barrier_successes),
2210 		data_race(n_barrier_attempts),
2211 		data_race(n_rcu_torture_barrier_error));
2212 	pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
2213 	pr_cont("nocb-toggles: %ld:%ld\n",
2214 		atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
2215 
2216 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2217 	if (atomic_read(&n_rcu_torture_mberror) ||
2218 	    atomic_read(&n_rcu_torture_mbchk_fail) ||
2219 	    n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
2220 	    n_rcu_torture_boost_failure || i > 1) {
2221 		pr_cont("%s", "!!! ");
2222 		atomic_inc(&n_rcu_torture_error);
2223 		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
2224 		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
2225 		WARN_ON_ONCE(n_rcu_torture_barrier_error);  // rcu_barrier()
2226 		WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
2227 		WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
2228 		WARN_ON_ONCE(i > 1); // Too-short grace period
2229 	}
2230 	pr_cont("Reader Pipe: ");
2231 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2232 		pr_cont(" %ld", pipesummary[i]);
2233 	pr_cont("\n");
2234 
2235 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2236 	pr_cont("Reader Batch: ");
2237 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2238 		pr_cont(" %ld", batchsummary[i]);
2239 	pr_cont("\n");
2240 
2241 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2242 	pr_cont("Free-Block Circulation: ");
2243 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2244 		pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
2245 	}
2246 	pr_cont("\n");
2247 
2248 	if (cur_ops->stats)
2249 		cur_ops->stats();
2250 	if (rtcv_snap == rcu_torture_current_version &&
2251 	    rcu_access_pointer(rcu_torture_current) &&
2252 	    !rcu_stall_is_suppressed()) {
2253 		int __maybe_unused flags = 0;
2254 		unsigned long __maybe_unused gp_seq = 0;
2255 
2256 		rcutorture_get_gp_data(cur_ops->ttype,
2257 				       &flags, &gp_seq);
2258 		srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
2259 					&flags, &gp_seq);
2260 		wtp = READ_ONCE(writer_task);
2261 		pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
2262 			 rcu_torture_writer_state_getname(),
2263 			 rcu_torture_writer_state, gp_seq, flags,
2264 			 wtp == NULL ? ~0U : wtp->__state,
2265 			 wtp == NULL ? -1 : (int)task_cpu(wtp));
2266 		if (!splatted && wtp) {
2267 			sched_show_task(wtp);
2268 			splatted = true;
2269 		}
2270 		if (cur_ops->gp_kthread_dbg)
2271 			cur_ops->gp_kthread_dbg();
2272 		rcu_ftrace_dump(DUMP_ALL);
2273 	}
2274 	rtcv_snap = rcu_torture_current_version;
2275 }
2276 
2277 /*
2278  * Periodically prints torture statistics, if periodic statistics printing
2279  * was specified via the stat_interval module parameter.
2280  */
2281 static int
2282 rcu_torture_stats(void *arg)
2283 {
2284 	VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
2285 	do {
2286 		schedule_timeout_interruptible(stat_interval * HZ);
2287 		rcu_torture_stats_print();
2288 		torture_shutdown_absorb("rcu_torture_stats");
2289 	} while (!torture_must_stop());
2290 	torture_kthread_stopping("rcu_torture_stats");
2291 	return 0;
2292 }
2293 
2294 /* Test mem_dump_obj() and friends.  */
2295 static void rcu_torture_mem_dump_obj(void)
2296 {
2297 	struct rcu_head *rhp;
2298 	struct kmem_cache *kcp;
2299 	static int z;
2300 
2301 	kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
2302 	if (WARN_ON_ONCE(!kcp))
2303 		return;
2304 	rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
2305 	if (WARN_ON_ONCE(!rhp)) {
2306 		kmem_cache_destroy(kcp);
2307 		return;
2308 	}
2309 	pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
2310 	pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
2311 	mem_dump_obj(ZERO_SIZE_PTR);
2312 	pr_alert("mem_dump_obj(NULL):");
2313 	mem_dump_obj(NULL);
2314 	pr_alert("mem_dump_obj(%px):", &rhp);
2315 	mem_dump_obj(&rhp);
2316 	pr_alert("mem_dump_obj(%px):", rhp);
2317 	mem_dump_obj(rhp);
2318 	pr_alert("mem_dump_obj(%px):", &rhp->func);
2319 	mem_dump_obj(&rhp->func);
2320 	pr_alert("mem_dump_obj(%px):", &z);
2321 	mem_dump_obj(&z);
2322 	kmem_cache_free(kcp, rhp);
2323 	kmem_cache_destroy(kcp);
2324 	rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2325 	if (WARN_ON_ONCE(!rhp))
2326 		return;
2327 	pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2328 	pr_alert("mem_dump_obj(kmalloc %px):", rhp);
2329 	mem_dump_obj(rhp);
2330 	pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
2331 	mem_dump_obj(&rhp->func);
2332 	kfree(rhp);
2333 	rhp = vmalloc(4096);
2334 	if (WARN_ON_ONCE(!rhp))
2335 		return;
2336 	pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2337 	pr_alert("mem_dump_obj(vmalloc %px):", rhp);
2338 	mem_dump_obj(rhp);
2339 	pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
2340 	mem_dump_obj(&rhp->func);
2341 	vfree(rhp);
2342 }
2343 
2344 static void
2345 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
2346 {
2347 	pr_alert("%s" TORTURE_FLAG
2348 		 "--- %s: nreaders=%d nfakewriters=%d "
2349 		 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
2350 		 "shuffle_interval=%d stutter=%d irqreader=%d "
2351 		 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
2352 		 "test_boost=%d/%d test_boost_interval=%d "
2353 		 "test_boost_duration=%d shutdown_secs=%d "
2354 		 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
2355 		 "stall_cpu_block=%d "
2356 		 "n_barrier_cbs=%d "
2357 		 "onoff_interval=%d onoff_holdoff=%d "
2358 		 "read_exit_delay=%d read_exit_burst=%d "
2359 		 "nocbs_nthreads=%d nocbs_toggle=%d "
2360 		 "test_nmis=%d\n",
2361 		 torture_type, tag, nrealreaders, nfakewriters,
2362 		 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
2363 		 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
2364 		 test_boost, cur_ops->can_boost,
2365 		 test_boost_interval, test_boost_duration, shutdown_secs,
2366 		 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
2367 		 stall_cpu_block,
2368 		 n_barrier_cbs,
2369 		 onoff_interval, onoff_holdoff,
2370 		 read_exit_delay, read_exit_burst,
2371 		 nocbs_nthreads, nocbs_toggle,
2372 		 test_nmis);
2373 }
2374 
2375 static int rcutorture_booster_cleanup(unsigned int cpu)
2376 {
2377 	struct task_struct *t;
2378 
2379 	if (boost_tasks[cpu] == NULL)
2380 		return 0;
2381 	mutex_lock(&boost_mutex);
2382 	t = boost_tasks[cpu];
2383 	boost_tasks[cpu] = NULL;
2384 	rcu_torture_enable_rt_throttle();
2385 	mutex_unlock(&boost_mutex);
2386 
2387 	/* This must be outside of the mutex, otherwise deadlock! */
2388 	torture_stop_kthread(rcu_torture_boost, t);
2389 	return 0;
2390 }
2391 
2392 static int rcutorture_booster_init(unsigned int cpu)
2393 {
2394 	int retval;
2395 
2396 	if (boost_tasks[cpu] != NULL)
2397 		return 0;  /* Already created, nothing more to do. */
2398 
2399 	// Testing RCU priority boosting requires rcutorture do
2400 	// some serious abuse.  Counter this by running ksoftirqd
2401 	// at higher priority.
2402 	if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
2403 		struct sched_param sp;
2404 		struct task_struct *t;
2405 
2406 		t = per_cpu(ksoftirqd, cpu);
2407 		WARN_ON_ONCE(!t);
2408 		sp.sched_priority = 2;
2409 		sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2410 	}
2411 
2412 	/* Don't allow time recalculation while creating a new task. */
2413 	mutex_lock(&boost_mutex);
2414 	rcu_torture_disable_rt_throttle();
2415 	VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
2416 	boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
2417 					      cpu, "rcu_torture_boost_%u");
2418 	if (IS_ERR(boost_tasks[cpu])) {
2419 		retval = PTR_ERR(boost_tasks[cpu]);
2420 		VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
2421 		n_rcu_torture_boost_ktrerror++;
2422 		boost_tasks[cpu] = NULL;
2423 		mutex_unlock(&boost_mutex);
2424 		return retval;
2425 	}
2426 	mutex_unlock(&boost_mutex);
2427 	return 0;
2428 }
2429 
2430 /*
2431  * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
2432  * induces a CPU stall for the time specified by stall_cpu.
2433  */
2434 static int rcu_torture_stall(void *args)
2435 {
2436 	int idx;
2437 	unsigned long stop_at;
2438 
2439 	VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
2440 	if (stall_cpu_holdoff > 0) {
2441 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2442 		schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2443 		VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2444 	}
2445 	if (!kthread_should_stop() && stall_gp_kthread > 0) {
2446 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2447 		rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2448 		for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2449 			if (kthread_should_stop())
2450 				break;
2451 			schedule_timeout_uninterruptible(HZ);
2452 		}
2453 	}
2454 	if (!kthread_should_stop() && stall_cpu > 0) {
2455 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2456 		stop_at = ktime_get_seconds() + stall_cpu;
2457 		/* RCU CPU stall is expected behavior in following code. */
2458 		idx = cur_ops->readlock();
2459 		if (stall_cpu_irqsoff)
2460 			local_irq_disable();
2461 		else if (!stall_cpu_block)
2462 			preempt_disable();
2463 		pr_alert("%s start on CPU %d.\n",
2464 			  __func__, raw_smp_processor_id());
2465 		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
2466 				    stop_at))
2467 			if (stall_cpu_block) {
2468 #ifdef CONFIG_PREEMPTION
2469 				preempt_schedule();
2470 #else
2471 				schedule_timeout_uninterruptible(HZ);
2472 #endif
2473 			} else if (stall_no_softlockup) {
2474 				touch_softlockup_watchdog();
2475 			}
2476 		if (stall_cpu_irqsoff)
2477 			local_irq_enable();
2478 		else if (!stall_cpu_block)
2479 			preempt_enable();
2480 		cur_ops->readunlock(idx);
2481 	}
2482 	pr_alert("%s end.\n", __func__);
2483 	torture_shutdown_absorb("rcu_torture_stall");
2484 	while (!kthread_should_stop())
2485 		schedule_timeout_interruptible(10 * HZ);
2486 	return 0;
2487 }
2488 
2489 /* Spawn CPU-stall kthread, if stall_cpu specified. */
2490 static int __init rcu_torture_stall_init(void)
2491 {
2492 	if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2493 		return 0;
2494 	return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2495 }
2496 
2497 /* State structure for forward-progress self-propagating RCU callback. */
2498 struct fwd_cb_state {
2499 	struct rcu_head rh;
2500 	int stop;
2501 };
2502 
2503 /*
2504  * Forward-progress self-propagating RCU callback function.  Because
2505  * callbacks run from softirq, this function is an implicit RCU read-side
2506  * critical section.
2507  */
2508 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2509 {
2510 	struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2511 
2512 	if (READ_ONCE(fcsp->stop)) {
2513 		WRITE_ONCE(fcsp->stop, 2);
2514 		return;
2515 	}
2516 	cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2517 }
2518 
2519 /* State for continuous-flood RCU callbacks. */
2520 struct rcu_fwd_cb {
2521 	struct rcu_head rh;
2522 	struct rcu_fwd_cb *rfc_next;
2523 	struct rcu_fwd *rfc_rfp;
2524 	int rfc_gps;
2525 };
2526 
2527 #define MAX_FWD_CB_JIFFIES	(8 * HZ) /* Maximum CB test duration. */
2528 #define MIN_FWD_CB_LAUNDERS	3	/* This many CB invocations to count. */
2529 #define MIN_FWD_CBS_LAUNDERED	100	/* Number of counted CBs. */
2530 #define FWD_CBS_HIST_DIV	10	/* Histogram buckets/second. */
2531 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2532 
2533 struct rcu_launder_hist {
2534 	long n_launders;
2535 	unsigned long launder_gp_seq;
2536 };
2537 
2538 struct rcu_fwd {
2539 	spinlock_t rcu_fwd_lock;
2540 	struct rcu_fwd_cb *rcu_fwd_cb_head;
2541 	struct rcu_fwd_cb **rcu_fwd_cb_tail;
2542 	long n_launders_cb;
2543 	unsigned long rcu_fwd_startat;
2544 	struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2545 	unsigned long rcu_launder_gp_seq_start;
2546 	int rcu_fwd_id;
2547 };
2548 
2549 static DEFINE_MUTEX(rcu_fwd_mutex);
2550 static struct rcu_fwd *rcu_fwds;
2551 static unsigned long rcu_fwd_seq;
2552 static atomic_long_t rcu_fwd_max_cbs;
2553 static bool rcu_fwd_emergency_stop;
2554 
2555 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2556 {
2557 	unsigned long gps;
2558 	unsigned long gps_old;
2559 	int i;
2560 	int j;
2561 
2562 	for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2563 		if (rfp->n_launders_hist[i].n_launders > 0)
2564 			break;
2565 	pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
2566 		 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
2567 	gps_old = rfp->rcu_launder_gp_seq_start;
2568 	for (j = 0; j <= i; j++) {
2569 		gps = rfp->n_launders_hist[j].launder_gp_seq;
2570 		pr_cont(" %ds/%d: %ld:%ld",
2571 			j + 1, FWD_CBS_HIST_DIV,
2572 			rfp->n_launders_hist[j].n_launders,
2573 			rcutorture_seq_diff(gps, gps_old));
2574 		gps_old = gps;
2575 	}
2576 	pr_cont("\n");
2577 }
2578 
2579 /* Callback function for continuous-flood RCU callbacks. */
2580 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2581 {
2582 	unsigned long flags;
2583 	int i;
2584 	struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2585 	struct rcu_fwd_cb **rfcpp;
2586 	struct rcu_fwd *rfp = rfcp->rfc_rfp;
2587 
2588 	rfcp->rfc_next = NULL;
2589 	rfcp->rfc_gps++;
2590 	spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2591 	rfcpp = rfp->rcu_fwd_cb_tail;
2592 	rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2593 	WRITE_ONCE(*rfcpp, rfcp);
2594 	WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2595 	i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2596 	if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2597 		i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2598 	rfp->n_launders_hist[i].n_launders++;
2599 	rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2600 	spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2601 }
2602 
2603 // Give the scheduler a chance, even on nohz_full CPUs.
2604 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2605 {
2606 	if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2607 		// Real call_rcu() floods hit userspace, so emulate that.
2608 		if (need_resched() || (iter & 0xfff))
2609 			schedule();
2610 		return;
2611 	}
2612 	// No userspace emulation: CB invocation throttles call_rcu()
2613 	cond_resched();
2614 }
2615 
2616 /*
2617  * Free all callbacks on the rcu_fwd_cb_head list, either because the
2618  * test is over or because we hit an OOM event.
2619  */
2620 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2621 {
2622 	unsigned long flags;
2623 	unsigned long freed = 0;
2624 	struct rcu_fwd_cb *rfcp;
2625 
2626 	for (;;) {
2627 		spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2628 		rfcp = rfp->rcu_fwd_cb_head;
2629 		if (!rfcp) {
2630 			spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2631 			break;
2632 		}
2633 		rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2634 		if (!rfp->rcu_fwd_cb_head)
2635 			rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2636 		spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2637 		kfree(rfcp);
2638 		freed++;
2639 		rcu_torture_fwd_prog_cond_resched(freed);
2640 		if (tick_nohz_full_enabled()) {
2641 			local_irq_save(flags);
2642 			rcu_momentary_dyntick_idle();
2643 			local_irq_restore(flags);
2644 		}
2645 	}
2646 	return freed;
2647 }
2648 
2649 /* Carry out need_resched()/cond_resched() forward-progress testing. */
2650 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2651 				    int *tested, int *tested_tries)
2652 {
2653 	unsigned long cver;
2654 	unsigned long dur;
2655 	struct fwd_cb_state fcs;
2656 	unsigned long gps;
2657 	int idx;
2658 	int sd;
2659 	int sd4;
2660 	bool selfpropcb = false;
2661 	unsigned long stopat;
2662 	static DEFINE_TORTURE_RANDOM(trs);
2663 
2664 	pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2665 	if (!cur_ops->sync)
2666 		return; // Cannot do need_resched() forward progress testing without ->sync.
2667 	if (cur_ops->call && cur_ops->cb_barrier) {
2668 		init_rcu_head_on_stack(&fcs.rh);
2669 		selfpropcb = true;
2670 	}
2671 
2672 	/* Tight loop containing cond_resched(). */
2673 	atomic_inc(&rcu_fwd_cb_nodelay);
2674 	cur_ops->sync(); /* Later readers see above write. */
2675 	if  (selfpropcb) {
2676 		WRITE_ONCE(fcs.stop, 0);
2677 		cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2678 	}
2679 	cver = READ_ONCE(rcu_torture_current_version);
2680 	gps = cur_ops->get_gp_seq();
2681 	sd = cur_ops->stall_dur() + 1;
2682 	sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2683 	dur = sd4 + torture_random(&trs) % (sd - sd4);
2684 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2685 	stopat = rfp->rcu_fwd_startat + dur;
2686 	while (time_before(jiffies, stopat) &&
2687 	       !shutdown_time_arrived() &&
2688 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2689 		idx = cur_ops->readlock();
2690 		udelay(10);
2691 		cur_ops->readunlock(idx);
2692 		if (!fwd_progress_need_resched || need_resched())
2693 			cond_resched();
2694 	}
2695 	(*tested_tries)++;
2696 	if (!time_before(jiffies, stopat) &&
2697 	    !shutdown_time_arrived() &&
2698 	    !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2699 		(*tested)++;
2700 		cver = READ_ONCE(rcu_torture_current_version) - cver;
2701 		gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2702 		WARN_ON(!cver && gps < 2);
2703 		pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
2704 			 rfp->rcu_fwd_id, dur, cver, gps);
2705 	}
2706 	if (selfpropcb) {
2707 		WRITE_ONCE(fcs.stop, 1);
2708 		cur_ops->sync(); /* Wait for running CB to complete. */
2709 		pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2710 		cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2711 	}
2712 
2713 	if (selfpropcb) {
2714 		WARN_ON(READ_ONCE(fcs.stop) != 2);
2715 		destroy_rcu_head_on_stack(&fcs.rh);
2716 	}
2717 	schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2718 	atomic_dec(&rcu_fwd_cb_nodelay);
2719 }
2720 
2721 /* Carry out call_rcu() forward-progress testing. */
2722 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2723 {
2724 	unsigned long cver;
2725 	unsigned long flags;
2726 	unsigned long gps;
2727 	int i;
2728 	long n_launders;
2729 	long n_launders_cb_snap;
2730 	long n_launders_sa;
2731 	long n_max_cbs;
2732 	long n_max_gps;
2733 	struct rcu_fwd_cb *rfcp;
2734 	struct rcu_fwd_cb *rfcpn;
2735 	unsigned long stopat;
2736 	unsigned long stoppedat;
2737 
2738 	pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2739 	if (READ_ONCE(rcu_fwd_emergency_stop))
2740 		return; /* Get out of the way quickly, no GP wait! */
2741 	if (!cur_ops->call)
2742 		return; /* Can't do call_rcu() fwd prog without ->call. */
2743 
2744 	/* Loop continuously posting RCU callbacks. */
2745 	atomic_inc(&rcu_fwd_cb_nodelay);
2746 	cur_ops->sync(); /* Later readers see above write. */
2747 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2748 	stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2749 	n_launders = 0;
2750 	rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2751 	n_launders_sa = 0;
2752 	n_max_cbs = 0;
2753 	n_max_gps = 0;
2754 	for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2755 		rfp->n_launders_hist[i].n_launders = 0;
2756 	cver = READ_ONCE(rcu_torture_current_version);
2757 	gps = cur_ops->get_gp_seq();
2758 	rfp->rcu_launder_gp_seq_start = gps;
2759 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2760 	while (time_before(jiffies, stopat) &&
2761 	       !shutdown_time_arrived() &&
2762 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2763 		rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2764 		rfcpn = NULL;
2765 		if (rfcp)
2766 			rfcpn = READ_ONCE(rfcp->rfc_next);
2767 		if (rfcpn) {
2768 			if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2769 			    ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2770 				break;
2771 			rfp->rcu_fwd_cb_head = rfcpn;
2772 			n_launders++;
2773 			n_launders_sa++;
2774 		} else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
2775 			rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2776 			if (WARN_ON_ONCE(!rfcp)) {
2777 				schedule_timeout_interruptible(1);
2778 				continue;
2779 			}
2780 			n_max_cbs++;
2781 			n_launders_sa = 0;
2782 			rfcp->rfc_gps = 0;
2783 			rfcp->rfc_rfp = rfp;
2784 		} else {
2785 			rfcp = NULL;
2786 		}
2787 		if (rfcp)
2788 			cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2789 		rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2790 		if (tick_nohz_full_enabled()) {
2791 			local_irq_save(flags);
2792 			rcu_momentary_dyntick_idle();
2793 			local_irq_restore(flags);
2794 		}
2795 	}
2796 	stoppedat = jiffies;
2797 	n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2798 	cver = READ_ONCE(rcu_torture_current_version) - cver;
2799 	gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2800 	pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2801 	cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2802 	(void)rcu_torture_fwd_prog_cbfree(rfp);
2803 
2804 	if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2805 	    !shutdown_time_arrived()) {
2806 		WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2807 		pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2808 			 __func__,
2809 			 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2810 			 n_launders + n_max_cbs - n_launders_cb_snap,
2811 			 n_launders, n_launders_sa,
2812 			 n_max_gps, n_max_cbs, cver, gps);
2813 		atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
2814 		mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
2815 		rcu_torture_fwd_cb_hist(rfp);
2816 		mutex_unlock(&rcu_fwd_mutex);
2817 	}
2818 	schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2819 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2820 	atomic_dec(&rcu_fwd_cb_nodelay);
2821 }
2822 
2823 
2824 /*
2825  * OOM notifier, but this only prints diagnostic information for the
2826  * current forward-progress test.
2827  */
2828 static int rcutorture_oom_notify(struct notifier_block *self,
2829 				 unsigned long notused, void *nfreed)
2830 {
2831 	int i;
2832 	long ncbs;
2833 	struct rcu_fwd *rfp;
2834 
2835 	mutex_lock(&rcu_fwd_mutex);
2836 	rfp = rcu_fwds;
2837 	if (!rfp) {
2838 		mutex_unlock(&rcu_fwd_mutex);
2839 		return NOTIFY_OK;
2840 	}
2841 	WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2842 	     __func__);
2843 	for (i = 0; i < fwd_progress; i++) {
2844 		rcu_torture_fwd_cb_hist(&rfp[i]);
2845 		rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
2846 	}
2847 	WRITE_ONCE(rcu_fwd_emergency_stop, true);
2848 	smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2849 	ncbs = 0;
2850 	for (i = 0; i < fwd_progress; i++)
2851 		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2852 	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2853 	cur_ops->cb_barrier();
2854 	ncbs = 0;
2855 	for (i = 0; i < fwd_progress; i++)
2856 		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2857 	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2858 	cur_ops->cb_barrier();
2859 	ncbs = 0;
2860 	for (i = 0; i < fwd_progress; i++)
2861 		ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2862 	pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2863 	smp_mb(); /* Frees before return to avoid redoing OOM. */
2864 	(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2865 	pr_info("%s returning after OOM processing.\n", __func__);
2866 	mutex_unlock(&rcu_fwd_mutex);
2867 	return NOTIFY_OK;
2868 }
2869 
2870 static struct notifier_block rcutorture_oom_nb = {
2871 	.notifier_call = rcutorture_oom_notify
2872 };
2873 
2874 /* Carry out grace-period forward-progress testing. */
2875 static int rcu_torture_fwd_prog(void *args)
2876 {
2877 	bool firsttime = true;
2878 	long max_cbs;
2879 	int oldnice = task_nice(current);
2880 	unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
2881 	struct rcu_fwd *rfp = args;
2882 	int tested = 0;
2883 	int tested_tries = 0;
2884 
2885 	VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2886 	rcu_bind_current_to_nocb();
2887 	if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2888 		set_user_nice(current, MAX_NICE);
2889 	do {
2890 		if (!rfp->rcu_fwd_id) {
2891 			schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2892 			WRITE_ONCE(rcu_fwd_emergency_stop, false);
2893 			if (!firsttime) {
2894 				max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
2895 				pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
2896 			}
2897 			firsttime = false;
2898 			WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
2899 		} else {
2900 			while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
2901 				schedule_timeout_interruptible(1);
2902 			oldseq = READ_ONCE(rcu_fwd_seq);
2903 		}
2904 		pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2905 		if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
2906 			rcu_torture_fwd_prog_cr(rfp);
2907 		if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
2908 		    (!IS_ENABLED(CONFIG_TINY_RCU) ||
2909 		     (rcu_inkernel_boot_has_ended() &&
2910 		      torture_num_online_cpus() > rfp->rcu_fwd_id)))
2911 			rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2912 
2913 		/* Avoid slow periods, better to test when busy. */
2914 		if (stutter_wait("rcu_torture_fwd_prog"))
2915 			sched_set_normal(current, oldnice);
2916 	} while (!torture_must_stop());
2917 	/* Short runs might not contain a valid forward-progress attempt. */
2918 	if (!rfp->rcu_fwd_id) {
2919 		WARN_ON(!tested && tested_tries >= 5);
2920 		pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2921 	}
2922 	torture_kthread_stopping("rcu_torture_fwd_prog");
2923 	return 0;
2924 }
2925 
2926 /* If forward-progress checking is requested and feasible, spawn the thread. */
2927 static int __init rcu_torture_fwd_prog_init(void)
2928 {
2929 	int i;
2930 	int ret = 0;
2931 	struct rcu_fwd *rfp;
2932 
2933 	if (!fwd_progress)
2934 		return 0; /* Not requested, so don't do it. */
2935 	if (fwd_progress >= nr_cpu_ids) {
2936 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
2937 		fwd_progress = nr_cpu_ids;
2938 	} else if (fwd_progress < 0) {
2939 		fwd_progress = nr_cpu_ids;
2940 	}
2941 	if ((!cur_ops->sync && !cur_ops->call) ||
2942 	    (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
2943 	    cur_ops == &rcu_busted_ops) {
2944 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2945 		fwd_progress = 0;
2946 		return 0;
2947 	}
2948 	if (stall_cpu > 0) {
2949 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2950 		fwd_progress = 0;
2951 		if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
2952 			return -EINVAL; /* In module, can fail back to user. */
2953 		WARN_ON(1); /* Make sure rcutorture notices conflict. */
2954 		return 0;
2955 	}
2956 	if (fwd_progress_holdoff <= 0)
2957 		fwd_progress_holdoff = 1;
2958 	if (fwd_progress_div <= 0)
2959 		fwd_progress_div = 4;
2960 	rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
2961 	fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
2962 	if (!rfp || !fwd_prog_tasks) {
2963 		kfree(rfp);
2964 		kfree(fwd_prog_tasks);
2965 		fwd_prog_tasks = NULL;
2966 		fwd_progress = 0;
2967 		return -ENOMEM;
2968 	}
2969 	for (i = 0; i < fwd_progress; i++) {
2970 		spin_lock_init(&rfp[i].rcu_fwd_lock);
2971 		rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
2972 		rfp[i].rcu_fwd_id = i;
2973 	}
2974 	mutex_lock(&rcu_fwd_mutex);
2975 	rcu_fwds = rfp;
2976 	mutex_unlock(&rcu_fwd_mutex);
2977 	register_oom_notifier(&rcutorture_oom_nb);
2978 	for (i = 0; i < fwd_progress; i++) {
2979 		ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
2980 		if (ret) {
2981 			fwd_progress = i;
2982 			return ret;
2983 		}
2984 	}
2985 	return 0;
2986 }
2987 
2988 static void rcu_torture_fwd_prog_cleanup(void)
2989 {
2990 	int i;
2991 	struct rcu_fwd *rfp;
2992 
2993 	if (!rcu_fwds || !fwd_prog_tasks)
2994 		return;
2995 	for (i = 0; i < fwd_progress; i++)
2996 		torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
2997 	unregister_oom_notifier(&rcutorture_oom_nb);
2998 	mutex_lock(&rcu_fwd_mutex);
2999 	rfp = rcu_fwds;
3000 	rcu_fwds = NULL;
3001 	mutex_unlock(&rcu_fwd_mutex);
3002 	kfree(rfp);
3003 	kfree(fwd_prog_tasks);
3004 	fwd_prog_tasks = NULL;
3005 }
3006 
3007 /* Callback function for RCU barrier testing. */
3008 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
3009 {
3010 	atomic_inc(&barrier_cbs_invoked);
3011 }
3012 
3013 /* IPI handler to get callback posted on desired CPU, if online. */
3014 static void rcu_torture_barrier1cb(void *rcu_void)
3015 {
3016 	struct rcu_head *rhp = rcu_void;
3017 
3018 	cur_ops->call(rhp, rcu_torture_barrier_cbf);
3019 }
3020 
3021 /* kthread function to register callbacks used to test RCU barriers. */
3022 static int rcu_torture_barrier_cbs(void *arg)
3023 {
3024 	long myid = (long)arg;
3025 	bool lastphase = false;
3026 	bool newphase;
3027 	struct rcu_head rcu;
3028 
3029 	init_rcu_head_on_stack(&rcu);
3030 	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
3031 	set_user_nice(current, MAX_NICE);
3032 	do {
3033 		wait_event(barrier_cbs_wq[myid],
3034 			   (newphase =
3035 			    smp_load_acquire(&barrier_phase)) != lastphase ||
3036 			   torture_must_stop());
3037 		lastphase = newphase;
3038 		if (torture_must_stop())
3039 			break;
3040 		/*
3041 		 * The above smp_load_acquire() ensures barrier_phase load
3042 		 * is ordered before the following ->call().
3043 		 */
3044 		if (smp_call_function_single(myid, rcu_torture_barrier1cb,
3045 					     &rcu, 1)) {
3046 			// IPI failed, so use direct call from current CPU.
3047 			cur_ops->call(&rcu, rcu_torture_barrier_cbf);
3048 		}
3049 		if (atomic_dec_and_test(&barrier_cbs_count))
3050 			wake_up(&barrier_wq);
3051 	} while (!torture_must_stop());
3052 	if (cur_ops->cb_barrier != NULL)
3053 		cur_ops->cb_barrier();
3054 	destroy_rcu_head_on_stack(&rcu);
3055 	torture_kthread_stopping("rcu_torture_barrier_cbs");
3056 	return 0;
3057 }
3058 
3059 /* kthread function to drive and coordinate RCU barrier testing. */
3060 static int rcu_torture_barrier(void *arg)
3061 {
3062 	int i;
3063 
3064 	VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
3065 	do {
3066 		atomic_set(&barrier_cbs_invoked, 0);
3067 		atomic_set(&barrier_cbs_count, n_barrier_cbs);
3068 		/* Ensure barrier_phase ordered after prior assignments. */
3069 		smp_store_release(&barrier_phase, !barrier_phase);
3070 		for (i = 0; i < n_barrier_cbs; i++)
3071 			wake_up(&barrier_cbs_wq[i]);
3072 		wait_event(barrier_wq,
3073 			   atomic_read(&barrier_cbs_count) == 0 ||
3074 			   torture_must_stop());
3075 		if (torture_must_stop())
3076 			break;
3077 		n_barrier_attempts++;
3078 		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
3079 		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
3080 			n_rcu_torture_barrier_error++;
3081 			pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
3082 			       atomic_read(&barrier_cbs_invoked),
3083 			       n_barrier_cbs);
3084 			WARN_ON(1);
3085 			// Wait manually for the remaining callbacks
3086 			i = 0;
3087 			do {
3088 				if (WARN_ON(i++ > HZ))
3089 					i = INT_MIN;
3090 				schedule_timeout_interruptible(1);
3091 				cur_ops->cb_barrier();
3092 			} while (atomic_read(&barrier_cbs_invoked) !=
3093 				 n_barrier_cbs &&
3094 				 !torture_must_stop());
3095 			smp_mb(); // Can't trust ordering if broken.
3096 			if (!torture_must_stop())
3097 				pr_err("Recovered: barrier_cbs_invoked = %d\n",
3098 				       atomic_read(&barrier_cbs_invoked));
3099 		} else {
3100 			n_barrier_successes++;
3101 		}
3102 		schedule_timeout_interruptible(HZ / 10);
3103 	} while (!torture_must_stop());
3104 	torture_kthread_stopping("rcu_torture_barrier");
3105 	return 0;
3106 }
3107 
3108 /* Initialize RCU barrier testing. */
3109 static int rcu_torture_barrier_init(void)
3110 {
3111 	int i;
3112 	int ret;
3113 
3114 	if (n_barrier_cbs <= 0)
3115 		return 0;
3116 	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
3117 		pr_alert("%s" TORTURE_FLAG
3118 			 " Call or barrier ops missing for %s,\n",
3119 			 torture_type, cur_ops->name);
3120 		pr_alert("%s" TORTURE_FLAG
3121 			 " RCU barrier testing omitted from run.\n",
3122 			 torture_type);
3123 		return 0;
3124 	}
3125 	atomic_set(&barrier_cbs_count, 0);
3126 	atomic_set(&barrier_cbs_invoked, 0);
3127 	barrier_cbs_tasks =
3128 		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
3129 			GFP_KERNEL);
3130 	barrier_cbs_wq =
3131 		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
3132 	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
3133 		return -ENOMEM;
3134 	for (i = 0; i < n_barrier_cbs; i++) {
3135 		init_waitqueue_head(&barrier_cbs_wq[i]);
3136 		ret = torture_create_kthread(rcu_torture_barrier_cbs,
3137 					     (void *)(long)i,
3138 					     barrier_cbs_tasks[i]);
3139 		if (ret)
3140 			return ret;
3141 	}
3142 	return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
3143 }
3144 
3145 /* Clean up after RCU barrier testing. */
3146 static void rcu_torture_barrier_cleanup(void)
3147 {
3148 	int i;
3149 
3150 	torture_stop_kthread(rcu_torture_barrier, barrier_task);
3151 	if (barrier_cbs_tasks != NULL) {
3152 		for (i = 0; i < n_barrier_cbs; i++)
3153 			torture_stop_kthread(rcu_torture_barrier_cbs,
3154 					     barrier_cbs_tasks[i]);
3155 		kfree(barrier_cbs_tasks);
3156 		barrier_cbs_tasks = NULL;
3157 	}
3158 	if (barrier_cbs_wq != NULL) {
3159 		kfree(barrier_cbs_wq);
3160 		barrier_cbs_wq = NULL;
3161 	}
3162 }
3163 
3164 static bool rcu_torture_can_boost(void)
3165 {
3166 	static int boost_warn_once;
3167 	int prio;
3168 
3169 	if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
3170 		return false;
3171 	if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
3172 		return false;
3173 
3174 	prio = rcu_get_gp_kthreads_prio();
3175 	if (!prio)
3176 		return false;
3177 
3178 	if (prio < 2) {
3179 		if (boost_warn_once == 1)
3180 			return false;
3181 
3182 		pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
3183 		boost_warn_once = 1;
3184 		return false;
3185 	}
3186 
3187 	return true;
3188 }
3189 
3190 static bool read_exit_child_stop;
3191 static bool read_exit_child_stopped;
3192 static wait_queue_head_t read_exit_wq;
3193 
3194 // Child kthread which just does an rcutorture reader and exits.
3195 static int rcu_torture_read_exit_child(void *trsp_in)
3196 {
3197 	struct torture_random_state *trsp = trsp_in;
3198 
3199 	set_user_nice(current, MAX_NICE);
3200 	// Minimize time between reading and exiting.
3201 	while (!kthread_should_stop())
3202 		schedule_timeout_uninterruptible(1);
3203 	(void)rcu_torture_one_read(trsp, -1);
3204 	return 0;
3205 }
3206 
3207 // Parent kthread which creates and destroys read-exit child kthreads.
3208 static int rcu_torture_read_exit(void *unused)
3209 {
3210 	bool errexit = false;
3211 	int i;
3212 	struct task_struct *tsp;
3213 	DEFINE_TORTURE_RANDOM(trs);
3214 
3215 	// Allocate and initialize.
3216 	set_user_nice(current, MAX_NICE);
3217 	VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
3218 
3219 	// Each pass through this loop does one read-exit episode.
3220 	do {
3221 		VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
3222 		for (i = 0; i < read_exit_burst; i++) {
3223 			if (READ_ONCE(read_exit_child_stop))
3224 				break;
3225 			stutter_wait("rcu_torture_read_exit");
3226 			// Spawn child.
3227 			tsp = kthread_run(rcu_torture_read_exit_child,
3228 					  &trs, "%s", "rcu_torture_read_exit_child");
3229 			if (IS_ERR(tsp)) {
3230 				TOROUT_ERRSTRING("out of memory");
3231 				errexit = true;
3232 				break;
3233 			}
3234 			cond_resched();
3235 			kthread_stop(tsp);
3236 			n_read_exits++;
3237 		}
3238 		VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
3239 		rcu_barrier(); // Wait for task_struct free, avoid OOM.
3240 		i = 0;
3241 		for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++)
3242 			schedule_timeout_uninterruptible(HZ);
3243 	} while (!errexit && !READ_ONCE(read_exit_child_stop));
3244 
3245 	// Clean up and exit.
3246 	smp_store_release(&read_exit_child_stopped, true); // After reaping.
3247 	smp_mb(); // Store before wakeup.
3248 	wake_up(&read_exit_wq);
3249 	while (!torture_must_stop())
3250 		schedule_timeout_uninterruptible(1);
3251 	torture_kthread_stopping("rcu_torture_read_exit");
3252 	return 0;
3253 }
3254 
3255 static int rcu_torture_read_exit_init(void)
3256 {
3257 	if (read_exit_burst <= 0)
3258 		return 0;
3259 	init_waitqueue_head(&read_exit_wq);
3260 	read_exit_child_stop = false;
3261 	read_exit_child_stopped = false;
3262 	return torture_create_kthread(rcu_torture_read_exit, NULL,
3263 				      read_exit_task);
3264 }
3265 
3266 static void rcu_torture_read_exit_cleanup(void)
3267 {
3268 	if (!read_exit_task)
3269 		return;
3270 	WRITE_ONCE(read_exit_child_stop, true);
3271 	smp_mb(); // Above write before wait.
3272 	wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
3273 	torture_stop_kthread(rcutorture_read_exit, read_exit_task);
3274 }
3275 
3276 static void rcutorture_test_nmis(int n)
3277 {
3278 #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3279 	int cpu;
3280 	int dumpcpu;
3281 	int i;
3282 
3283 	for (i = 0; i < n; i++) {
3284 		preempt_disable();
3285 		cpu = smp_processor_id();
3286 		dumpcpu = cpu + 1;
3287 		if (dumpcpu >= nr_cpu_ids)
3288 			dumpcpu = 0;
3289 		pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu);
3290 		dump_cpu_task(dumpcpu);
3291 		preempt_enable();
3292 		schedule_timeout_uninterruptible(15 * HZ);
3293 	}
3294 #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3295 	WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis);
3296 #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3297 }
3298 
3299 static enum cpuhp_state rcutor_hp;
3300 
3301 static void
3302 rcu_torture_cleanup(void)
3303 {
3304 	int firsttime;
3305 	int flags = 0;
3306 	unsigned long gp_seq = 0;
3307 	int i;
3308 
3309 	if (torture_cleanup_begin()) {
3310 		if (cur_ops->cb_barrier != NULL) {
3311 			pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3312 			cur_ops->cb_barrier();
3313 		}
3314 		rcu_gp_slow_unregister(NULL);
3315 		return;
3316 	}
3317 	if (!cur_ops) {
3318 		torture_cleanup_end();
3319 		rcu_gp_slow_unregister(NULL);
3320 		return;
3321 	}
3322 
3323 	rcutorture_test_nmis(test_nmis);
3324 
3325 	if (cur_ops->gp_kthread_dbg)
3326 		cur_ops->gp_kthread_dbg();
3327 	rcu_torture_read_exit_cleanup();
3328 	rcu_torture_barrier_cleanup();
3329 	rcu_torture_fwd_prog_cleanup();
3330 	torture_stop_kthread(rcu_torture_stall, stall_task);
3331 	torture_stop_kthread(rcu_torture_writer, writer_task);
3332 
3333 	if (nocb_tasks) {
3334 		for (i = 0; i < nrealnocbers; i++)
3335 			torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
3336 		kfree(nocb_tasks);
3337 		nocb_tasks = NULL;
3338 	}
3339 
3340 	if (reader_tasks) {
3341 		for (i = 0; i < nrealreaders; i++)
3342 			torture_stop_kthread(rcu_torture_reader,
3343 					     reader_tasks[i]);
3344 		kfree(reader_tasks);
3345 		reader_tasks = NULL;
3346 	}
3347 	kfree(rcu_torture_reader_mbchk);
3348 	rcu_torture_reader_mbchk = NULL;
3349 
3350 	if (fakewriter_tasks) {
3351 		for (i = 0; i < nfakewriters; i++)
3352 			torture_stop_kthread(rcu_torture_fakewriter,
3353 					     fakewriter_tasks[i]);
3354 		kfree(fakewriter_tasks);
3355 		fakewriter_tasks = NULL;
3356 	}
3357 
3358 	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3359 	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3360 	pr_alert("%s:  End-test grace-period state: g%ld f%#x total-gps=%ld\n",
3361 		 cur_ops->name, (long)gp_seq, flags,
3362 		 rcutorture_seq_diff(gp_seq, start_gp_seq));
3363 	torture_stop_kthread(rcu_torture_stats, stats_task);
3364 	torture_stop_kthread(rcu_torture_fqs, fqs_task);
3365 	if (rcu_torture_can_boost() && rcutor_hp >= 0)
3366 		cpuhp_remove_state(rcutor_hp);
3367 
3368 	/*
3369 	 * Wait for all RCU callbacks to fire, then do torture-type-specific
3370 	 * cleanup operations.
3371 	 */
3372 	if (cur_ops->cb_barrier != NULL) {
3373 		pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3374 		cur_ops->cb_barrier();
3375 	}
3376 	if (cur_ops->cleanup != NULL)
3377 		cur_ops->cleanup();
3378 
3379 	rcu_torture_mem_dump_obj();
3380 
3381 	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
3382 
3383 	if (err_segs_recorded) {
3384 		pr_alert("Failure/close-call rcutorture reader segments:\n");
3385 		if (rt_read_nsegs == 0)
3386 			pr_alert("\t: No segments recorded!!!\n");
3387 		firsttime = 1;
3388 		for (i = 0; i < rt_read_nsegs; i++) {
3389 			pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
3390 			if (err_segs[i].rt_delay_jiffies != 0) {
3391 				pr_cont("%s%ldjiffies", firsttime ? "" : "+",
3392 					err_segs[i].rt_delay_jiffies);
3393 				firsttime = 0;
3394 			}
3395 			if (err_segs[i].rt_delay_ms != 0) {
3396 				pr_cont("%s%ldms", firsttime ? "" : "+",
3397 					err_segs[i].rt_delay_ms);
3398 				firsttime = 0;
3399 			}
3400 			if (err_segs[i].rt_delay_us != 0) {
3401 				pr_cont("%s%ldus", firsttime ? "" : "+",
3402 					err_segs[i].rt_delay_us);
3403 				firsttime = 0;
3404 			}
3405 			pr_cont("%s\n",
3406 				err_segs[i].rt_preempted ? "preempted" : "");
3407 
3408 		}
3409 	}
3410 	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
3411 		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
3412 	else if (torture_onoff_failures())
3413 		rcu_torture_print_module_parms(cur_ops,
3414 					       "End of test: RCU_HOTPLUG");
3415 	else
3416 		rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
3417 	torture_cleanup_end();
3418 	rcu_gp_slow_unregister(&rcu_fwd_cb_nodelay);
3419 }
3420 
3421 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3422 static void rcu_torture_leak_cb(struct rcu_head *rhp)
3423 {
3424 }
3425 
3426 static void rcu_torture_err_cb(struct rcu_head *rhp)
3427 {
3428 	/*
3429 	 * This -might- happen due to race conditions, but is unlikely.
3430 	 * The scenario that leads to this happening is that the
3431 	 * first of the pair of duplicate callbacks is queued,
3432 	 * someone else starts a grace period that includes that
3433 	 * callback, then the second of the pair must wait for the
3434 	 * next grace period.  Unlikely, but can happen.  If it
3435 	 * does happen, the debug-objects subsystem won't have splatted.
3436 	 */
3437 	pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
3438 }
3439 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3440 
3441 /*
3442  * Verify that double-free causes debug-objects to complain, but only
3443  * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
3444  * cannot be carried out.
3445  */
3446 static void rcu_test_debug_objects(void)
3447 {
3448 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3449 	struct rcu_head rh1;
3450 	struct rcu_head rh2;
3451 	struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
3452 
3453 	init_rcu_head_on_stack(&rh1);
3454 	init_rcu_head_on_stack(&rh2);
3455 	pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
3456 
3457 	/* Try to queue the rh2 pair of callbacks for the same grace period. */
3458 	preempt_disable(); /* Prevent preemption from interrupting test. */
3459 	rcu_read_lock(); /* Make it impossible to finish a grace period. */
3460 	call_rcu_hurry(&rh1, rcu_torture_leak_cb); /* Start grace period. */
3461 	local_irq_disable(); /* Make it harder to start a new grace period. */
3462 	call_rcu_hurry(&rh2, rcu_torture_leak_cb);
3463 	call_rcu_hurry(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
3464 	if (rhp) {
3465 		call_rcu_hurry(rhp, rcu_torture_leak_cb);
3466 		call_rcu_hurry(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
3467 	}
3468 	local_irq_enable();
3469 	rcu_read_unlock();
3470 	preempt_enable();
3471 
3472 	/* Wait for them all to get done so we can safely return. */
3473 	rcu_barrier();
3474 	pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
3475 	destroy_rcu_head_on_stack(&rh1);
3476 	destroy_rcu_head_on_stack(&rh2);
3477 	kfree(rhp);
3478 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3479 	pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
3480 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3481 }
3482 
3483 static void rcutorture_sync(void)
3484 {
3485 	static unsigned long n;
3486 
3487 	if (cur_ops->sync && !(++n & 0xfff))
3488 		cur_ops->sync();
3489 }
3490 
3491 static DEFINE_MUTEX(mut0);
3492 static DEFINE_MUTEX(mut1);
3493 static DEFINE_MUTEX(mut2);
3494 static DEFINE_MUTEX(mut3);
3495 static DEFINE_MUTEX(mut4);
3496 static DEFINE_MUTEX(mut5);
3497 static DEFINE_MUTEX(mut6);
3498 static DEFINE_MUTEX(mut7);
3499 static DEFINE_MUTEX(mut8);
3500 static DEFINE_MUTEX(mut9);
3501 
3502 static DECLARE_RWSEM(rwsem0);
3503 static DECLARE_RWSEM(rwsem1);
3504 static DECLARE_RWSEM(rwsem2);
3505 static DECLARE_RWSEM(rwsem3);
3506 static DECLARE_RWSEM(rwsem4);
3507 static DECLARE_RWSEM(rwsem5);
3508 static DECLARE_RWSEM(rwsem6);
3509 static DECLARE_RWSEM(rwsem7);
3510 static DECLARE_RWSEM(rwsem8);
3511 static DECLARE_RWSEM(rwsem9);
3512 
3513 DEFINE_STATIC_SRCU(srcu0);
3514 DEFINE_STATIC_SRCU(srcu1);
3515 DEFINE_STATIC_SRCU(srcu2);
3516 DEFINE_STATIC_SRCU(srcu3);
3517 DEFINE_STATIC_SRCU(srcu4);
3518 DEFINE_STATIC_SRCU(srcu5);
3519 DEFINE_STATIC_SRCU(srcu6);
3520 DEFINE_STATIC_SRCU(srcu7);
3521 DEFINE_STATIC_SRCU(srcu8);
3522 DEFINE_STATIC_SRCU(srcu9);
3523 
3524 static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i,
3525 			     int cyclelen, int deadlock)
3526 {
3527 	int j = i + 1;
3528 
3529 	if (j >= cyclelen)
3530 		j = deadlock ? 0 : -1;
3531 	if (j >= 0)
3532 		pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i);
3533 	else
3534 		pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i);
3535 	return j;
3536 }
3537 
3538 // Test lockdep on SRCU-based deadlock scenarios.
3539 static void rcu_torture_init_srcu_lockdep(void)
3540 {
3541 	int cyclelen;
3542 	int deadlock;
3543 	bool err = false;
3544 	int i;
3545 	int j;
3546 	int idx;
3547 	struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4,
3548 				 &mut5, &mut6, &mut7, &mut8, &mut9 };
3549 	struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4,
3550 					  &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 };
3551 	struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4,
3552 					&srcu5, &srcu6, &srcu7, &srcu8, &srcu9 };
3553 	int testtype;
3554 
3555 	if (!test_srcu_lockdep)
3556 		return;
3557 
3558 	deadlock = test_srcu_lockdep / 1000;
3559 	testtype = (test_srcu_lockdep / 10) % 100;
3560 	cyclelen = test_srcu_lockdep % 10;
3561 	WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus));
3562 	if (WARN_ONCE(deadlock != !!deadlock,
3563 		      "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n",
3564 		      __func__, test_srcu_lockdep, deadlock))
3565 		err = true;
3566 	if (WARN_ONCE(cyclelen <= 0,
3567 		      "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n",
3568 		      __func__, test_srcu_lockdep, cyclelen))
3569 		err = true;
3570 	if (err)
3571 		goto err_out;
3572 
3573 	if (testtype == 0) {
3574 		pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n",
3575 			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3576 		if (deadlock && cyclelen == 1)
3577 			pr_info("%s: Expect hang.\n", __func__);
3578 		for (i = 0; i < cyclelen; i++) {
3579 			j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu",
3580 					      "srcu_read_unlock", i, cyclelen, deadlock);
3581 			idx = srcu_read_lock(srcus[i]);
3582 			if (j >= 0)
3583 				synchronize_srcu(srcus[j]);
3584 			srcu_read_unlock(srcus[i], idx);
3585 		}
3586 		return;
3587 	}
3588 
3589 	if (testtype == 1) {
3590 		pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n",
3591 			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3592 		for (i = 0; i < cyclelen; i++) {
3593 			pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n",
3594 				__func__, i, i, i, i);
3595 			idx = srcu_read_lock(srcus[i]);
3596 			mutex_lock(muts[i]);
3597 			mutex_unlock(muts[i]);
3598 			srcu_read_unlock(srcus[i], idx);
3599 
3600 			j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu",
3601 					      "mutex_unlock", i, cyclelen, deadlock);
3602 			mutex_lock(muts[i]);
3603 			if (j >= 0)
3604 				synchronize_srcu(srcus[j]);
3605 			mutex_unlock(muts[i]);
3606 		}
3607 		return;
3608 	}
3609 
3610 	if (testtype == 2) {
3611 		pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n",
3612 			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3613 		for (i = 0; i < cyclelen; i++) {
3614 			pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n",
3615 				__func__, i, i, i, i);
3616 			idx = srcu_read_lock(srcus[i]);
3617 			down_read(rwsems[i]);
3618 			up_read(rwsems[i]);
3619 			srcu_read_unlock(srcus[i], idx);
3620 
3621 			j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu",
3622 					      "up_write", i, cyclelen, deadlock);
3623 			down_write(rwsems[i]);
3624 			if (j >= 0)
3625 				synchronize_srcu(srcus[j]);
3626 			up_write(rwsems[i]);
3627 		}
3628 		return;
3629 	}
3630 
3631 #ifdef CONFIG_TASKS_TRACE_RCU
3632 	if (testtype == 3) {
3633 		pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n",
3634 			__func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3635 		if (deadlock && cyclelen == 1)
3636 			pr_info("%s: Expect hang.\n", __func__);
3637 		for (i = 0; i < cyclelen; i++) {
3638 			char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock";
3639 			char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace"
3640 						     : "synchronize_srcu";
3641 			char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock";
3642 
3643 			j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock);
3644 			if (i == 0)
3645 				rcu_read_lock_trace();
3646 			else
3647 				idx = srcu_read_lock(srcus[i]);
3648 			if (j >= 0) {
3649 				if (i == cyclelen - 1)
3650 					synchronize_rcu_tasks_trace();
3651 				else
3652 					synchronize_srcu(srcus[j]);
3653 			}
3654 			if (i == 0)
3655 				rcu_read_unlock_trace();
3656 			else
3657 				srcu_read_unlock(srcus[i], idx);
3658 		}
3659 		return;
3660 	}
3661 #endif // #ifdef CONFIG_TASKS_TRACE_RCU
3662 
3663 err_out:
3664 	pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep);
3665 	pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__);
3666 	pr_info("%s: D: Deadlock if nonzero.\n", __func__);
3667 	pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__);
3668 	pr_info("%s: L: Cycle length.\n", __func__);
3669 	if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU))
3670 		pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__);
3671 }
3672 
3673 static int __init
3674 rcu_torture_init(void)
3675 {
3676 	long i;
3677 	int cpu;
3678 	int firsterr = 0;
3679 	int flags = 0;
3680 	unsigned long gp_seq = 0;
3681 	static struct rcu_torture_ops *torture_ops[] = {
3682 		&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
3683 		TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
3684 		&trivial_ops,
3685 	};
3686 
3687 	if (!torture_init_begin(torture_type, verbose))
3688 		return -EBUSY;
3689 
3690 	/* Process args and tell the world that the torturer is on the job. */
3691 	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
3692 		cur_ops = torture_ops[i];
3693 		if (strcmp(torture_type, cur_ops->name) == 0)
3694 			break;
3695 	}
3696 	if (i == ARRAY_SIZE(torture_ops)) {
3697 		pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
3698 			 torture_type);
3699 		pr_alert("rcu-torture types:");
3700 		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
3701 			pr_cont(" %s", torture_ops[i]->name);
3702 		pr_cont("\n");
3703 		firsterr = -EINVAL;
3704 		cur_ops = NULL;
3705 		goto unwind;
3706 	}
3707 	if (cur_ops->fqs == NULL && fqs_duration != 0) {
3708 		pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
3709 		fqs_duration = 0;
3710 	}
3711 	if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops ||
3712 				    !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) {
3713 		pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n",
3714 			 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU));
3715 		nocbs_nthreads = 0;
3716 	}
3717 	if (cur_ops->init)
3718 		cur_ops->init();
3719 
3720 	rcu_torture_init_srcu_lockdep();
3721 
3722 	if (nreaders >= 0) {
3723 		nrealreaders = nreaders;
3724 	} else {
3725 		nrealreaders = num_online_cpus() - 2 - nreaders;
3726 		if (nrealreaders <= 0)
3727 			nrealreaders = 1;
3728 	}
3729 	rcu_torture_print_module_parms(cur_ops, "Start of test");
3730 	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3731 	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3732 	start_gp_seq = gp_seq;
3733 	pr_alert("%s:  Start-test grace-period state: g%ld f%#x\n",
3734 		 cur_ops->name, (long)gp_seq, flags);
3735 
3736 	/* Set up the freelist. */
3737 
3738 	INIT_LIST_HEAD(&rcu_torture_freelist);
3739 	for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
3740 		rcu_tortures[i].rtort_mbtest = 0;
3741 		list_add_tail(&rcu_tortures[i].rtort_free,
3742 			      &rcu_torture_freelist);
3743 	}
3744 
3745 	/* Initialize the statistics so that each run gets its own numbers. */
3746 
3747 	rcu_torture_current = NULL;
3748 	rcu_torture_current_version = 0;
3749 	atomic_set(&n_rcu_torture_alloc, 0);
3750 	atomic_set(&n_rcu_torture_alloc_fail, 0);
3751 	atomic_set(&n_rcu_torture_free, 0);
3752 	atomic_set(&n_rcu_torture_mberror, 0);
3753 	atomic_set(&n_rcu_torture_mbchk_fail, 0);
3754 	atomic_set(&n_rcu_torture_mbchk_tries, 0);
3755 	atomic_set(&n_rcu_torture_error, 0);
3756 	n_rcu_torture_barrier_error = 0;
3757 	n_rcu_torture_boost_ktrerror = 0;
3758 	n_rcu_torture_boost_failure = 0;
3759 	n_rcu_torture_boosts = 0;
3760 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3761 		atomic_set(&rcu_torture_wcount[i], 0);
3762 	for_each_possible_cpu(cpu) {
3763 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3764 			per_cpu(rcu_torture_count, cpu)[i] = 0;
3765 			per_cpu(rcu_torture_batch, cpu)[i] = 0;
3766 		}
3767 	}
3768 	err_segs_recorded = 0;
3769 	rt_read_nsegs = 0;
3770 
3771 	/* Start up the kthreads. */
3772 
3773 	rcu_torture_write_types();
3774 	firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3775 					  writer_task);
3776 	if (torture_init_error(firsterr))
3777 		goto unwind;
3778 	if (nfakewriters > 0) {
3779 		fakewriter_tasks = kcalloc(nfakewriters,
3780 					   sizeof(fakewriter_tasks[0]),
3781 					   GFP_KERNEL);
3782 		if (fakewriter_tasks == NULL) {
3783 			TOROUT_ERRSTRING("out of memory");
3784 			firsterr = -ENOMEM;
3785 			goto unwind;
3786 		}
3787 	}
3788 	for (i = 0; i < nfakewriters; i++) {
3789 		firsterr = torture_create_kthread(rcu_torture_fakewriter,
3790 						  NULL, fakewriter_tasks[i]);
3791 		if (torture_init_error(firsterr))
3792 			goto unwind;
3793 	}
3794 	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3795 			       GFP_KERNEL);
3796 	rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3797 					   GFP_KERNEL);
3798 	if (!reader_tasks || !rcu_torture_reader_mbchk) {
3799 		TOROUT_ERRSTRING("out of memory");
3800 		firsterr = -ENOMEM;
3801 		goto unwind;
3802 	}
3803 	for (i = 0; i < nrealreaders; i++) {
3804 		rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3805 		firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3806 						  reader_tasks[i]);
3807 		if (torture_init_error(firsterr))
3808 			goto unwind;
3809 	}
3810 	nrealnocbers = nocbs_nthreads;
3811 	if (WARN_ON(nrealnocbers < 0))
3812 		nrealnocbers = 1;
3813 	if (WARN_ON(nocbs_toggle < 0))
3814 		nocbs_toggle = HZ;
3815 	if (nrealnocbers > 0) {
3816 		nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3817 		if (nocb_tasks == NULL) {
3818 			TOROUT_ERRSTRING("out of memory");
3819 			firsterr = -ENOMEM;
3820 			goto unwind;
3821 		}
3822 	} else {
3823 		nocb_tasks = NULL;
3824 	}
3825 	for (i = 0; i < nrealnocbers; i++) {
3826 		firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3827 		if (torture_init_error(firsterr))
3828 			goto unwind;
3829 	}
3830 	if (stat_interval > 0) {
3831 		firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3832 						  stats_task);
3833 		if (torture_init_error(firsterr))
3834 			goto unwind;
3835 	}
3836 	if (test_no_idle_hz && shuffle_interval > 0) {
3837 		firsterr = torture_shuffle_init(shuffle_interval * HZ);
3838 		if (torture_init_error(firsterr))
3839 			goto unwind;
3840 	}
3841 	if (stutter < 0)
3842 		stutter = 0;
3843 	if (stutter) {
3844 		int t;
3845 
3846 		t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3847 		firsterr = torture_stutter_init(stutter * HZ, t);
3848 		if (torture_init_error(firsterr))
3849 			goto unwind;
3850 	}
3851 	if (fqs_duration < 0)
3852 		fqs_duration = 0;
3853 	if (fqs_duration) {
3854 		/* Create the fqs thread */
3855 		firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3856 						  fqs_task);
3857 		if (torture_init_error(firsterr))
3858 			goto unwind;
3859 	}
3860 	if (test_boost_interval < 1)
3861 		test_boost_interval = 1;
3862 	if (test_boost_duration < 2)
3863 		test_boost_duration = 2;
3864 	if (rcu_torture_can_boost()) {
3865 
3866 		boost_starttime = jiffies + test_boost_interval * HZ;
3867 
3868 		firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3869 					     rcutorture_booster_init,
3870 					     rcutorture_booster_cleanup);
3871 		rcutor_hp = firsterr;
3872 		if (torture_init_error(firsterr))
3873 			goto unwind;
3874 	}
3875 	shutdown_jiffies = jiffies + shutdown_secs * HZ;
3876 	firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3877 	if (torture_init_error(firsterr))
3878 		goto unwind;
3879 	firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3880 				      rcutorture_sync);
3881 	if (torture_init_error(firsterr))
3882 		goto unwind;
3883 	firsterr = rcu_torture_stall_init();
3884 	if (torture_init_error(firsterr))
3885 		goto unwind;
3886 	firsterr = rcu_torture_fwd_prog_init();
3887 	if (torture_init_error(firsterr))
3888 		goto unwind;
3889 	firsterr = rcu_torture_barrier_init();
3890 	if (torture_init_error(firsterr))
3891 		goto unwind;
3892 	firsterr = rcu_torture_read_exit_init();
3893 	if (torture_init_error(firsterr))
3894 		goto unwind;
3895 	if (object_debug)
3896 		rcu_test_debug_objects();
3897 	torture_init_end();
3898 	rcu_gp_slow_register(&rcu_fwd_cb_nodelay);
3899 	return 0;
3900 
3901 unwind:
3902 	torture_init_end();
3903 	rcu_torture_cleanup();
3904 	if (shutdown_secs) {
3905 		WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3906 		kernel_power_off();
3907 	}
3908 	return firsterr;
3909 }
3910 
3911 module_init(rcu_torture_init);
3912 module_exit(rcu_torture_cleanup);
3913