1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Read-Copy Update module-based torture test facility
4 *
5 * Copyright (C) IBM Corporation, 2005, 2006
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Josh Triplett <josh@joshtriplett.org>
9 *
10 * See also: Documentation/RCU/torture.rst
11 */
12
13 #define pr_fmt(fmt) fmt
14
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/rcu_notifier.h>
25 #include <linux/interrupt.h>
26 #include <linux/sched/signal.h>
27 #include <uapi/linux/sched/types.h>
28 #include <linux/atomic.h>
29 #include <linux/bitops.h>
30 #include <linux/completion.h>
31 #include <linux/moduleparam.h>
32 #include <linux/percpu.h>
33 #include <linux/notifier.h>
34 #include <linux/reboot.h>
35 #include <linux/freezer.h>
36 #include <linux/cpu.h>
37 #include <linux/delay.h>
38 #include <linux/stat.h>
39 #include <linux/srcu.h>
40 #include <linux/slab.h>
41 #include <linux/trace_clock.h>
42 #include <asm/byteorder.h>
43 #include <linux/torture.h>
44 #include <linux/vmalloc.h>
45 #include <linux/sched/debug.h>
46 #include <linux/sched/sysctl.h>
47 #include <linux/oom.h>
48 #include <linux/tick.h>
49 #include <linux/rcupdate_trace.h>
50 #include <linux/nmi.h>
51
52 #include "rcu.h"
53
54 MODULE_DESCRIPTION("Read-Copy Update module-based torture test facility");
55 MODULE_LICENSE("GPL");
56 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
57
58 /* Bits for ->extendables field, extendables param, and related definitions. */
59 #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */
60 #define RCUTORTURE_RDR_MASK_1 (0xff << RCUTORTURE_RDR_SHIFT_1)
61 #define RCUTORTURE_RDR_SHIFT_2 16 /* Put SRCU index in upper bits. */
62 #define RCUTORTURE_RDR_MASK_2 (0xff << RCUTORTURE_RDR_SHIFT_2)
63 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
64 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
65 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
66 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
67 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
68 #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */
69 #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */
70 #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */
71 #define RCUTORTURE_MAX_EXTEND \
72 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
73 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
74 #define RCUTORTURE_RDR_ALLBITS \
75 (RCUTORTURE_MAX_EXTEND | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2 | \
76 RCUTORTURE_RDR_MASK_1 | RCUTORTURE_RDR_MASK_2)
77 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
78 /* Must be power of two minus one. */
79 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
80
81 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
82 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
83 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable");
84 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
85 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
86 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
87 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
88 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
89 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
90 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
91 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives");
92 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
93 torture_param(bool, gp_cond_exp_full, false,
94 "Use conditional/async full-stateexpedited GP wait primitives");
95 torture_param(int, gp_cond_wi, 16 * USEC_PER_SEC / HZ,
96 "Wait interval for normal conditional grace periods, us (default 16 jiffies)");
97 torture_param(int, gp_cond_wi_exp, 128,
98 "Wait interval for expedited conditional grace periods, us (default 128 us)");
99 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
100 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
101 torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
102 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
103 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
104 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
105 torture_param(int, gp_poll_wi, 16 * USEC_PER_SEC / HZ,
106 "Wait interval for normal polled grace periods, us (default 16 jiffies)");
107 torture_param(int, gp_poll_wi_exp, 128,
108 "Wait interval for expedited polled grace periods, us (default 128 us)");
109 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
110 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
111 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
112 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
113 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
114 torture_param(int, nreaders, -1, "Number of RCU reader threads");
115 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
116 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
117 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
118 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
119 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
120 torture_param(int, preempt_duration, 0, "Preemption duration (ms), zero to disable");
121 torture_param(int, preempt_interval, MSEC_PER_SEC, "Interval between preemptions (ms)");
122 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
123 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
124 torture_param(int, reader_flavor, SRCU_READ_FLAVOR_NORMAL, "Reader flavors to use, one per bit.");
125 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
126 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
127 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
128 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s).");
129 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
130 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
131 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
132 torture_param(int, stall_cpu_repeat, 0, "Number of additional stalls after the first one.");
133 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
134 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
135 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
136 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
137 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds.");
138 torture_param(int, test_boost_holdoff, 0, "Holdoff time from rcutorture start, seconds.");
139 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds.");
140 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable.");
141 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs");
142 torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario.");
143 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
144
145 static char *torture_type = "rcu";
146 module_param(torture_type, charp, 0444);
147 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
148
149 static int nrealnocbers;
150 static int nrealreaders;
151 static int nrealfakewriters;
152 static struct task_struct *writer_task;
153 static struct task_struct **fakewriter_tasks;
154 static struct task_struct **reader_tasks;
155 static struct task_struct **nocb_tasks;
156 static struct task_struct *stats_task;
157 static struct task_struct *fqs_task;
158 static struct task_struct *boost_tasks[NR_CPUS];
159 static struct task_struct *stall_task;
160 static struct task_struct **fwd_prog_tasks;
161 static struct task_struct **barrier_cbs_tasks;
162 static struct task_struct *barrier_task;
163 static struct task_struct *read_exit_task;
164 static struct task_struct *preempt_task;
165
166 #define RCU_TORTURE_PIPE_LEN 10
167
168 // Mailbox-like structure to check RCU global memory ordering.
169 struct rcu_torture_reader_check {
170 unsigned long rtc_myloops;
171 int rtc_chkrdr;
172 unsigned long rtc_chkloops;
173 int rtc_ready;
174 struct rcu_torture_reader_check *rtc_assigner;
175 } ____cacheline_internodealigned_in_smp;
176
177 // Update-side data structure used to check RCU readers.
178 struct rcu_torture {
179 struct rcu_head rtort_rcu;
180 int rtort_pipe_count;
181 struct list_head rtort_free;
182 int rtort_mbtest;
183 struct rcu_torture_reader_check *rtort_chkp;
184 };
185
186 static LIST_HEAD(rcu_torture_freelist);
187 static struct rcu_torture __rcu *rcu_torture_current;
188 static unsigned long rcu_torture_current_version;
189 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
190 static DEFINE_SPINLOCK(rcu_torture_lock);
191 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
192 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
193 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
194 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
195 static atomic_t n_rcu_torture_alloc;
196 static atomic_t n_rcu_torture_alloc_fail;
197 static atomic_t n_rcu_torture_free;
198 static atomic_t n_rcu_torture_mberror;
199 static atomic_t n_rcu_torture_mbchk_fail;
200 static atomic_t n_rcu_torture_mbchk_tries;
201 static atomic_t n_rcu_torture_error;
202 static long n_rcu_torture_barrier_error;
203 static long n_rcu_torture_boost_ktrerror;
204 static long n_rcu_torture_boost_failure;
205 static long n_rcu_torture_boosts;
206 static atomic_long_t n_rcu_torture_timers;
207 static long n_barrier_attempts;
208 static long n_barrier_successes; /* did rcu_barrier test succeed? */
209 static unsigned long n_read_exits;
210 static struct list_head rcu_torture_removed;
211 static unsigned long shutdown_jiffies;
212 static unsigned long start_gp_seq;
213 static atomic_long_t n_nocb_offload;
214 static atomic_long_t n_nocb_deoffload;
215
216 static int rcu_torture_writer_state;
217 #define RTWS_FIXED_DELAY 0
218 #define RTWS_DELAY 1
219 #define RTWS_REPLACE 2
220 #define RTWS_DEF_FREE 3
221 #define RTWS_EXP_SYNC 4
222 #define RTWS_COND_GET 5
223 #define RTWS_COND_GET_FULL 6
224 #define RTWS_COND_GET_EXP 7
225 #define RTWS_COND_GET_EXP_FULL 8
226 #define RTWS_COND_SYNC 9
227 #define RTWS_COND_SYNC_FULL 10
228 #define RTWS_COND_SYNC_EXP 11
229 #define RTWS_COND_SYNC_EXP_FULL 12
230 #define RTWS_POLL_GET 13
231 #define RTWS_POLL_GET_FULL 14
232 #define RTWS_POLL_GET_EXP 15
233 #define RTWS_POLL_GET_EXP_FULL 16
234 #define RTWS_POLL_WAIT 17
235 #define RTWS_POLL_WAIT_FULL 18
236 #define RTWS_POLL_WAIT_EXP 19
237 #define RTWS_POLL_WAIT_EXP_FULL 20
238 #define RTWS_SYNC 21
239 #define RTWS_STUTTER 22
240 #define RTWS_STOPPING 23
241 static const char * const rcu_torture_writer_state_names[] = {
242 "RTWS_FIXED_DELAY",
243 "RTWS_DELAY",
244 "RTWS_REPLACE",
245 "RTWS_DEF_FREE",
246 "RTWS_EXP_SYNC",
247 "RTWS_COND_GET",
248 "RTWS_COND_GET_FULL",
249 "RTWS_COND_GET_EXP",
250 "RTWS_COND_GET_EXP_FULL",
251 "RTWS_COND_SYNC",
252 "RTWS_COND_SYNC_FULL",
253 "RTWS_COND_SYNC_EXP",
254 "RTWS_COND_SYNC_EXP_FULL",
255 "RTWS_POLL_GET",
256 "RTWS_POLL_GET_FULL",
257 "RTWS_POLL_GET_EXP",
258 "RTWS_POLL_GET_EXP_FULL",
259 "RTWS_POLL_WAIT",
260 "RTWS_POLL_WAIT_FULL",
261 "RTWS_POLL_WAIT_EXP",
262 "RTWS_POLL_WAIT_EXP_FULL",
263 "RTWS_SYNC",
264 "RTWS_STUTTER",
265 "RTWS_STOPPING",
266 };
267
268 /* Record reader segment types and duration for first failing read. */
269 struct rt_read_seg {
270 int rt_readstate;
271 unsigned long rt_delay_jiffies;
272 unsigned long rt_delay_ms;
273 unsigned long rt_delay_us;
274 bool rt_preempted;
275 int rt_cpu;
276 int rt_end_cpu;
277 unsigned long long rt_gp_seq;
278 unsigned long long rt_gp_seq_end;
279 u64 rt_ts;
280 };
281 static int err_segs_recorded;
282 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
283 static int rt_read_nsegs;
284 static int rt_read_preempted;
285
rcu_torture_writer_state_getname(void)286 static const char *rcu_torture_writer_state_getname(void)
287 {
288 unsigned int i = READ_ONCE(rcu_torture_writer_state);
289
290 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
291 return "???";
292 return rcu_torture_writer_state_names[i];
293 }
294
295 #ifdef CONFIG_RCU_TRACE
rcu_trace_clock_local(void)296 static u64 notrace rcu_trace_clock_local(void)
297 {
298 u64 ts = trace_clock_local();
299
300 (void)do_div(ts, NSEC_PER_USEC);
301 return ts;
302 }
303 #else /* #ifdef CONFIG_RCU_TRACE */
rcu_trace_clock_local(void)304 static u64 notrace rcu_trace_clock_local(void)
305 {
306 return 0ULL;
307 }
308 #endif /* #else #ifdef CONFIG_RCU_TRACE */
309
310 /*
311 * Stop aggressive CPU-hog tests a bit before the end of the test in order
312 * to avoid interfering with test shutdown.
313 */
shutdown_time_arrived(void)314 static bool shutdown_time_arrived(void)
315 {
316 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
317 }
318
319 static unsigned long boost_starttime; /* jiffies of next boost test start. */
320 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
321 /* and boost task create/destroy. */
322 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
323 static bool barrier_phase; /* Test phase. */
324 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
325 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
326 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
327
328 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
329
330 /*
331 * Allocate an element from the rcu_tortures pool.
332 */
333 static struct rcu_torture *
rcu_torture_alloc(void)334 rcu_torture_alloc(void)
335 {
336 struct list_head *p;
337
338 spin_lock_bh(&rcu_torture_lock);
339 if (list_empty(&rcu_torture_freelist)) {
340 atomic_inc(&n_rcu_torture_alloc_fail);
341 spin_unlock_bh(&rcu_torture_lock);
342 return NULL;
343 }
344 atomic_inc(&n_rcu_torture_alloc);
345 p = rcu_torture_freelist.next;
346 list_del_init(p);
347 spin_unlock_bh(&rcu_torture_lock);
348 return container_of(p, struct rcu_torture, rtort_free);
349 }
350
351 /*
352 * Free an element to the rcu_tortures pool.
353 */
354 static void
rcu_torture_free(struct rcu_torture * p)355 rcu_torture_free(struct rcu_torture *p)
356 {
357 atomic_inc(&n_rcu_torture_free);
358 spin_lock_bh(&rcu_torture_lock);
359 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
360 spin_unlock_bh(&rcu_torture_lock);
361 }
362
363 /*
364 * Operations vector for selecting different types of tests.
365 */
366
367 struct rcu_torture_ops {
368 int ttype;
369 void (*init)(void);
370 void (*cleanup)(void);
371 int (*readlock)(void);
372 void (*read_delay)(struct torture_random_state *rrsp,
373 struct rt_read_seg *rtrsp);
374 void (*readunlock)(int idx);
375 int (*readlock_held)(void); // lockdep.
376 int (*readlock_nesting)(void); // actual nesting, if available, -1 if not.
377 unsigned long (*get_gp_seq)(void);
378 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
379 void (*deferred_free)(struct rcu_torture *p);
380 void (*sync)(void);
381 void (*exp_sync)(void);
382 unsigned long (*get_gp_state_exp)(void);
383 unsigned long (*start_gp_poll_exp)(void);
384 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp);
385 bool (*poll_gp_state_exp)(unsigned long oldstate);
386 void (*cond_sync_exp)(unsigned long oldstate);
387 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp);
388 unsigned long (*get_comp_state)(void);
389 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp);
390 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2);
391 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2);
392 unsigned long (*get_gp_state)(void);
393 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
394 unsigned long (*start_gp_poll)(void);
395 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
396 bool (*poll_gp_state)(unsigned long oldstate);
397 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp);
398 bool (*poll_need_2gp)(bool poll, bool poll_full);
399 void (*cond_sync)(unsigned long oldstate);
400 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
401 int poll_active;
402 int poll_active_full;
403 call_rcu_func_t call;
404 void (*cb_barrier)(void);
405 void (*fqs)(void);
406 void (*stats)(void);
407 void (*gp_kthread_dbg)(void);
408 bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
409 int (*stall_dur)(void);
410 void (*get_gp_data)(int *flags, unsigned long *gp_seq);
411 void (*gp_slow_register)(atomic_t *rgssp);
412 void (*gp_slow_unregister)(atomic_t *rgssp);
413 bool (*reader_blocked)(void);
414 unsigned long long (*gather_gp_seqs)(void);
415 void (*format_gp_seqs)(unsigned long long seqs, char *cp, size_t len);
416 long cbflood_max;
417 int irq_capable;
418 int can_boost;
419 int extendables;
420 int slow_gps;
421 int no_pi_lock;
422 int debug_objects;
423 int start_poll_irqsoff;
424 const char *name;
425 };
426
427 static struct rcu_torture_ops *cur_ops;
428
429 /*
430 * Definitions for rcu torture testing.
431 */
432
torture_readlock_not_held(void)433 static int torture_readlock_not_held(void)
434 {
435 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
436 }
437
rcu_torture_read_lock(void)438 static int rcu_torture_read_lock(void)
439 {
440 rcu_read_lock();
441 return 0;
442 }
443
444 static void
rcu_read_delay(struct torture_random_state * rrsp,struct rt_read_seg * rtrsp)445 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
446 {
447 unsigned long started;
448 unsigned long completed;
449 const unsigned long shortdelay_us = 200;
450 unsigned long longdelay_ms = 300;
451 unsigned long long ts;
452
453 /* We want a short delay sometimes to make a reader delay the grace
454 * period, and we want a long delay occasionally to trigger
455 * force_quiescent_state. */
456
457 if (!atomic_read(&rcu_fwd_cb_nodelay) &&
458 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
459 started = cur_ops->get_gp_seq();
460 ts = rcu_trace_clock_local();
461 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
462 longdelay_ms = 5; /* Avoid triggering BH limits. */
463 mdelay(longdelay_ms);
464 rtrsp->rt_delay_ms = longdelay_ms;
465 completed = cur_ops->get_gp_seq();
466 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
467 started, completed);
468 }
469 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
470 udelay(shortdelay_us);
471 rtrsp->rt_delay_us = shortdelay_us;
472 }
473 if (!preempt_count() &&
474 !(torture_random(rrsp) % (nrealreaders * 500)))
475 torture_preempt_schedule(); /* QS only if preemptible. */
476 }
477
rcu_torture_read_unlock(int idx)478 static void rcu_torture_read_unlock(int idx)
479 {
480 rcu_read_unlock();
481 }
482
rcu_torture_readlock_nesting(void)483 static int rcu_torture_readlock_nesting(void)
484 {
485 if (IS_ENABLED(CONFIG_PREEMPT_RCU))
486 return rcu_preempt_depth();
487 if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
488 return (preempt_count() & PREEMPT_MASK);
489 return -1;
490 }
491
492 /*
493 * Update callback in the pipe. This should be invoked after a grace period.
494 */
495 static bool
rcu_torture_pipe_update_one(struct rcu_torture * rp)496 rcu_torture_pipe_update_one(struct rcu_torture *rp)
497 {
498 int i;
499 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
500
501 if (rtrcp) {
502 WRITE_ONCE(rp->rtort_chkp, NULL);
503 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
504 }
505 i = rp->rtort_pipe_count;
506 if (i > RCU_TORTURE_PIPE_LEN)
507 i = RCU_TORTURE_PIPE_LEN;
508 atomic_inc(&rcu_torture_wcount[i]);
509 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
510 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
511 if (i + 1 >= RCU_TORTURE_PIPE_LEN) {
512 rp->rtort_mbtest = 0;
513 return true;
514 }
515 return false;
516 }
517
518 /*
519 * Update all callbacks in the pipe. Suitable for synchronous grace-period
520 * primitives.
521 */
522 static void
rcu_torture_pipe_update(struct rcu_torture * old_rp)523 rcu_torture_pipe_update(struct rcu_torture *old_rp)
524 {
525 struct rcu_torture *rp;
526 struct rcu_torture *rp1;
527
528 if (old_rp)
529 list_add(&old_rp->rtort_free, &rcu_torture_removed);
530 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
531 if (rcu_torture_pipe_update_one(rp)) {
532 list_del(&rp->rtort_free);
533 rcu_torture_free(rp);
534 }
535 }
536 }
537
538 static void
rcu_torture_cb(struct rcu_head * p)539 rcu_torture_cb(struct rcu_head *p)
540 {
541 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
542
543 if (torture_must_stop_irq()) {
544 /* Test is ending, just drop callbacks on the floor. */
545 /* The next initialization will pick up the pieces. */
546 return;
547 }
548 if (rcu_torture_pipe_update_one(rp))
549 rcu_torture_free(rp);
550 else
551 cur_ops->deferred_free(rp);
552 }
553
rcu_no_completed(void)554 static unsigned long rcu_no_completed(void)
555 {
556 return 0;
557 }
558
rcu_torture_deferred_free(struct rcu_torture * p)559 static void rcu_torture_deferred_free(struct rcu_torture *p)
560 {
561 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb);
562 }
563
rcu_sync_torture_init(void)564 static void rcu_sync_torture_init(void)
565 {
566 INIT_LIST_HEAD(&rcu_torture_removed);
567 }
568
rcu_poll_need_2gp(bool poll,bool poll_full)569 static bool rcu_poll_need_2gp(bool poll, bool poll_full)
570 {
571 return poll;
572 }
573
574 static struct rcu_torture_ops rcu_ops = {
575 .ttype = RCU_FLAVOR,
576 .init = rcu_sync_torture_init,
577 .readlock = rcu_torture_read_lock,
578 .read_delay = rcu_read_delay,
579 .readunlock = rcu_torture_read_unlock,
580 .readlock_held = torture_readlock_not_held,
581 .readlock_nesting = rcu_torture_readlock_nesting,
582 .get_gp_seq = rcu_get_gp_seq,
583 .gp_diff = rcu_seq_diff,
584 .deferred_free = rcu_torture_deferred_free,
585 .sync = synchronize_rcu,
586 .exp_sync = synchronize_rcu_expedited,
587 .same_gp_state = same_state_synchronize_rcu,
588 .same_gp_state_full = same_state_synchronize_rcu_full,
589 .get_comp_state = get_completed_synchronize_rcu,
590 .get_comp_state_full = get_completed_synchronize_rcu_full,
591 .get_gp_state = get_state_synchronize_rcu,
592 .get_gp_state_full = get_state_synchronize_rcu_full,
593 .start_gp_poll = start_poll_synchronize_rcu,
594 .start_gp_poll_full = start_poll_synchronize_rcu_full,
595 .poll_gp_state = poll_state_synchronize_rcu,
596 .poll_gp_state_full = poll_state_synchronize_rcu_full,
597 .poll_need_2gp = rcu_poll_need_2gp,
598 .cond_sync = cond_synchronize_rcu,
599 .cond_sync_full = cond_synchronize_rcu_full,
600 .poll_active = NUM_ACTIVE_RCU_POLL_OLDSTATE,
601 .poll_active_full = NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE,
602 .get_gp_state_exp = get_state_synchronize_rcu,
603 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited,
604 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full,
605 .poll_gp_state_exp = poll_state_synchronize_rcu,
606 .cond_sync_exp = cond_synchronize_rcu_expedited,
607 .cond_sync_exp_full = cond_synchronize_rcu_expedited_full,
608 .call = call_rcu_hurry,
609 .cb_barrier = rcu_barrier,
610 .fqs = rcu_force_quiescent_state,
611 .gp_kthread_dbg = show_rcu_gp_kthreads,
612 .check_boost_failed = rcu_check_boost_fail,
613 .stall_dur = rcu_jiffies_till_stall_check,
614 .get_gp_data = rcutorture_get_gp_data,
615 .gp_slow_register = rcu_gp_slow_register,
616 .gp_slow_unregister = rcu_gp_slow_unregister,
617 .reader_blocked = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)
618 ? has_rcu_reader_blocked
619 : NULL,
620 .gather_gp_seqs = rcutorture_gather_gp_seqs,
621 .format_gp_seqs = rcutorture_format_gp_seqs,
622 .irq_capable = 1,
623 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
624 .extendables = RCUTORTURE_MAX_EXTEND,
625 .debug_objects = 1,
626 .start_poll_irqsoff = 1,
627 .name = "rcu"
628 };
629
630 /*
631 * Don't even think about trying any of these in real life!!!
632 * The names includes "busted", and they really means it!
633 * The only purpose of these functions is to provide a buggy RCU
634 * implementation to make sure that rcutorture correctly emits
635 * buggy-RCU error messages.
636 */
rcu_busted_torture_deferred_free(struct rcu_torture * p)637 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
638 {
639 /* This is a deliberate bug for testing purposes only! */
640 rcu_torture_cb(&p->rtort_rcu);
641 }
642
synchronize_rcu_busted(void)643 static void synchronize_rcu_busted(void)
644 {
645 /* This is a deliberate bug for testing purposes only! */
646 }
647
648 static void
call_rcu_busted(struct rcu_head * head,rcu_callback_t func)649 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
650 {
651 /* This is a deliberate bug for testing purposes only! */
652 func(head);
653 }
654
655 static struct rcu_torture_ops rcu_busted_ops = {
656 .ttype = INVALID_RCU_FLAVOR,
657 .init = rcu_sync_torture_init,
658 .readlock = rcu_torture_read_lock,
659 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
660 .readunlock = rcu_torture_read_unlock,
661 .readlock_held = torture_readlock_not_held,
662 .get_gp_seq = rcu_no_completed,
663 .deferred_free = rcu_busted_torture_deferred_free,
664 .sync = synchronize_rcu_busted,
665 .exp_sync = synchronize_rcu_busted,
666 .call = call_rcu_busted,
667 .gather_gp_seqs = rcutorture_gather_gp_seqs,
668 .format_gp_seqs = rcutorture_format_gp_seqs,
669 .irq_capable = 1,
670 .extendables = RCUTORTURE_MAX_EXTEND,
671 .name = "busted"
672 };
673
674 /*
675 * Definitions for srcu torture testing.
676 */
677
678 DEFINE_STATIC_SRCU(srcu_ctl);
679 static struct srcu_struct srcu_ctld;
680 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
681 static struct rcu_torture_ops srcud_ops;
682
srcu_get_gp_data(int * flags,unsigned long * gp_seq)683 static void srcu_get_gp_data(int *flags, unsigned long *gp_seq)
684 {
685 srcutorture_get_gp_data(srcu_ctlp, flags, gp_seq);
686 }
687
srcu_torture_read_lock(void)688 static int srcu_torture_read_lock(void)
689 {
690 int idx;
691 struct srcu_ctr __percpu *scp;
692 int ret = 0;
693
694 WARN_ON_ONCE(reader_flavor & ~SRCU_READ_FLAVOR_ALL);
695
696 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL)) {
697 idx = srcu_read_lock(srcu_ctlp);
698 WARN_ON_ONCE(idx & ~0x1);
699 ret += idx;
700 }
701 if (reader_flavor & SRCU_READ_FLAVOR_NMI) {
702 idx = srcu_read_lock_nmisafe(srcu_ctlp);
703 WARN_ON_ONCE(idx & ~0x1);
704 ret += idx << 1;
705 }
706 if (reader_flavor & SRCU_READ_FLAVOR_LITE) {
707 idx = srcu_read_lock_lite(srcu_ctlp);
708 WARN_ON_ONCE(idx & ~0x1);
709 ret += idx << 2;
710 }
711 if (reader_flavor & SRCU_READ_FLAVOR_FAST) {
712 scp = srcu_read_lock_fast(srcu_ctlp);
713 idx = __srcu_ptr_to_ctr(srcu_ctlp, scp);
714 WARN_ON_ONCE(idx & ~0x1);
715 ret += idx << 3;
716 }
717 return ret;
718 }
719
720 static void
srcu_read_delay(struct torture_random_state * rrsp,struct rt_read_seg * rtrsp)721 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
722 {
723 long delay;
724 const long uspertick = 1000000 / HZ;
725 const long longdelay = 10;
726
727 /* We want there to be long-running readers, but not all the time. */
728
729 delay = torture_random(rrsp) %
730 (nrealreaders * 2 * longdelay * uspertick);
731 if (!delay && in_task()) {
732 schedule_timeout_interruptible(longdelay);
733 rtrsp->rt_delay_jiffies = longdelay;
734 } else {
735 rcu_read_delay(rrsp, rtrsp);
736 }
737 }
738
srcu_torture_read_unlock(int idx)739 static void srcu_torture_read_unlock(int idx)
740 {
741 WARN_ON_ONCE((reader_flavor && (idx & ~reader_flavor)) || (!reader_flavor && (idx & ~0x1)));
742 if (reader_flavor & SRCU_READ_FLAVOR_FAST)
743 srcu_read_unlock_fast(srcu_ctlp, __srcu_ctr_to_ptr(srcu_ctlp, (idx & 0x8) >> 3));
744 if (reader_flavor & SRCU_READ_FLAVOR_LITE)
745 srcu_read_unlock_lite(srcu_ctlp, (idx & 0x4) >> 2);
746 if (reader_flavor & SRCU_READ_FLAVOR_NMI)
747 srcu_read_unlock_nmisafe(srcu_ctlp, (idx & 0x2) >> 1);
748 if ((reader_flavor & SRCU_READ_FLAVOR_NORMAL) || !(reader_flavor & SRCU_READ_FLAVOR_ALL))
749 srcu_read_unlock(srcu_ctlp, idx & 0x1);
750 }
751
torture_srcu_read_lock_held(void)752 static int torture_srcu_read_lock_held(void)
753 {
754 return srcu_read_lock_held(srcu_ctlp);
755 }
756
srcu_torture_completed(void)757 static unsigned long srcu_torture_completed(void)
758 {
759 return srcu_batches_completed(srcu_ctlp);
760 }
761
srcu_torture_deferred_free(struct rcu_torture * rp)762 static void srcu_torture_deferred_free(struct rcu_torture *rp)
763 {
764 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
765 }
766
srcu_torture_synchronize(void)767 static void srcu_torture_synchronize(void)
768 {
769 synchronize_srcu(srcu_ctlp);
770 }
771
srcu_torture_get_gp_state(void)772 static unsigned long srcu_torture_get_gp_state(void)
773 {
774 return get_state_synchronize_srcu(srcu_ctlp);
775 }
776
srcu_torture_start_gp_poll(void)777 static unsigned long srcu_torture_start_gp_poll(void)
778 {
779 return start_poll_synchronize_srcu(srcu_ctlp);
780 }
781
srcu_torture_poll_gp_state(unsigned long oldstate)782 static bool srcu_torture_poll_gp_state(unsigned long oldstate)
783 {
784 return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
785 }
786
srcu_torture_call(struct rcu_head * head,rcu_callback_t func)787 static void srcu_torture_call(struct rcu_head *head,
788 rcu_callback_t func)
789 {
790 call_srcu(srcu_ctlp, head, func);
791 }
792
srcu_torture_barrier(void)793 static void srcu_torture_barrier(void)
794 {
795 srcu_barrier(srcu_ctlp);
796 }
797
srcu_torture_stats(void)798 static void srcu_torture_stats(void)
799 {
800 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
801 }
802
srcu_torture_synchronize_expedited(void)803 static void srcu_torture_synchronize_expedited(void)
804 {
805 synchronize_srcu_expedited(srcu_ctlp);
806 }
807
808 static struct rcu_torture_ops srcu_ops = {
809 .ttype = SRCU_FLAVOR,
810 .init = rcu_sync_torture_init,
811 .readlock = srcu_torture_read_lock,
812 .read_delay = srcu_read_delay,
813 .readunlock = srcu_torture_read_unlock,
814 .readlock_held = torture_srcu_read_lock_held,
815 .get_gp_seq = srcu_torture_completed,
816 .gp_diff = rcu_seq_diff,
817 .deferred_free = srcu_torture_deferred_free,
818 .sync = srcu_torture_synchronize,
819 .exp_sync = srcu_torture_synchronize_expedited,
820 .same_gp_state = same_state_synchronize_srcu,
821 .get_comp_state = get_completed_synchronize_srcu,
822 .get_gp_state = srcu_torture_get_gp_state,
823 .start_gp_poll = srcu_torture_start_gp_poll,
824 .poll_gp_state = srcu_torture_poll_gp_state,
825 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE,
826 .call = srcu_torture_call,
827 .cb_barrier = srcu_torture_barrier,
828 .stats = srcu_torture_stats,
829 .get_gp_data = srcu_get_gp_data,
830 .cbflood_max = 50000,
831 .irq_capable = 1,
832 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
833 .debug_objects = 1,
834 .name = "srcu"
835 };
836
srcu_torture_init(void)837 static void srcu_torture_init(void)
838 {
839 rcu_sync_torture_init();
840 WARN_ON(init_srcu_struct(&srcu_ctld));
841 srcu_ctlp = &srcu_ctld;
842 }
843
srcu_torture_cleanup(void)844 static void srcu_torture_cleanup(void)
845 {
846 cleanup_srcu_struct(&srcu_ctld);
847 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
848 }
849
850 /* As above, but dynamically allocated. */
851 static struct rcu_torture_ops srcud_ops = {
852 .ttype = SRCU_FLAVOR,
853 .init = srcu_torture_init,
854 .cleanup = srcu_torture_cleanup,
855 .readlock = srcu_torture_read_lock,
856 .read_delay = srcu_read_delay,
857 .readunlock = srcu_torture_read_unlock,
858 .readlock_held = torture_srcu_read_lock_held,
859 .get_gp_seq = srcu_torture_completed,
860 .gp_diff = rcu_seq_diff,
861 .deferred_free = srcu_torture_deferred_free,
862 .sync = srcu_torture_synchronize,
863 .exp_sync = srcu_torture_synchronize_expedited,
864 .same_gp_state = same_state_synchronize_srcu,
865 .get_comp_state = get_completed_synchronize_srcu,
866 .get_gp_state = srcu_torture_get_gp_state,
867 .start_gp_poll = srcu_torture_start_gp_poll,
868 .poll_gp_state = srcu_torture_poll_gp_state,
869 .poll_active = NUM_ACTIVE_SRCU_POLL_OLDSTATE,
870 .call = srcu_torture_call,
871 .cb_barrier = srcu_torture_barrier,
872 .stats = srcu_torture_stats,
873 .get_gp_data = srcu_get_gp_data,
874 .cbflood_max = 50000,
875 .irq_capable = 1,
876 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
877 .debug_objects = 1,
878 .name = "srcud"
879 };
880
881 /* As above, but broken due to inappropriate reader extension. */
882 static struct rcu_torture_ops busted_srcud_ops = {
883 .ttype = SRCU_FLAVOR,
884 .init = srcu_torture_init,
885 .cleanup = srcu_torture_cleanup,
886 .readlock = srcu_torture_read_lock,
887 .read_delay = rcu_read_delay,
888 .readunlock = srcu_torture_read_unlock,
889 .readlock_held = torture_srcu_read_lock_held,
890 .get_gp_seq = srcu_torture_completed,
891 .deferred_free = srcu_torture_deferred_free,
892 .sync = srcu_torture_synchronize,
893 .exp_sync = srcu_torture_synchronize_expedited,
894 .call = srcu_torture_call,
895 .cb_barrier = srcu_torture_barrier,
896 .stats = srcu_torture_stats,
897 .irq_capable = 1,
898 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
899 .extendables = RCUTORTURE_MAX_EXTEND,
900 .name = "busted_srcud"
901 };
902
903 /*
904 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
905 * This implementation does not necessarily work well with CPU hotplug.
906 */
907
synchronize_rcu_trivial(void)908 static void synchronize_rcu_trivial(void)
909 {
910 int cpu;
911
912 for_each_online_cpu(cpu) {
913 torture_sched_setaffinity(current->pid, cpumask_of(cpu), true);
914 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
915 }
916 }
917
rcu_torture_read_lock_trivial(void)918 static int rcu_torture_read_lock_trivial(void)
919 {
920 preempt_disable();
921 return 0;
922 }
923
rcu_torture_read_unlock_trivial(int idx)924 static void rcu_torture_read_unlock_trivial(int idx)
925 {
926 preempt_enable();
927 }
928
929 static struct rcu_torture_ops trivial_ops = {
930 .ttype = RCU_TRIVIAL_FLAVOR,
931 .init = rcu_sync_torture_init,
932 .readlock = rcu_torture_read_lock_trivial,
933 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
934 .readunlock = rcu_torture_read_unlock_trivial,
935 .readlock_held = torture_readlock_not_held,
936 .get_gp_seq = rcu_no_completed,
937 .sync = synchronize_rcu_trivial,
938 .exp_sync = synchronize_rcu_trivial,
939 .irq_capable = 1,
940 .name = "trivial"
941 };
942
943 #ifdef CONFIG_TASKS_RCU
944
945 /*
946 * Definitions for RCU-tasks torture testing.
947 */
948
tasks_torture_read_lock(void)949 static int tasks_torture_read_lock(void)
950 {
951 return 0;
952 }
953
tasks_torture_read_unlock(int idx)954 static void tasks_torture_read_unlock(int idx)
955 {
956 }
957
rcu_tasks_torture_deferred_free(struct rcu_torture * p)958 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
959 {
960 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
961 }
962
synchronize_rcu_mult_test(void)963 static void synchronize_rcu_mult_test(void)
964 {
965 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry);
966 }
967
968 static struct rcu_torture_ops tasks_ops = {
969 .ttype = RCU_TASKS_FLAVOR,
970 .init = rcu_sync_torture_init,
971 .readlock = tasks_torture_read_lock,
972 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
973 .readunlock = tasks_torture_read_unlock,
974 .get_gp_seq = rcu_no_completed,
975 .deferred_free = rcu_tasks_torture_deferred_free,
976 .sync = synchronize_rcu_tasks,
977 .exp_sync = synchronize_rcu_mult_test,
978 .call = call_rcu_tasks,
979 .cb_barrier = rcu_barrier_tasks,
980 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
981 .get_gp_data = rcu_tasks_get_gp_data,
982 .irq_capable = 1,
983 .slow_gps = 1,
984 .name = "tasks"
985 };
986
987 #define TASKS_OPS &tasks_ops,
988
989 #else // #ifdef CONFIG_TASKS_RCU
990
991 #define TASKS_OPS
992
993 #endif // #else #ifdef CONFIG_TASKS_RCU
994
995
996 #ifdef CONFIG_TASKS_RUDE_RCU
997
998 /*
999 * Definitions for rude RCU-tasks torture testing.
1000 */
1001
1002 static struct rcu_torture_ops tasks_rude_ops = {
1003 .ttype = RCU_TASKS_RUDE_FLAVOR,
1004 .init = rcu_sync_torture_init,
1005 .readlock = rcu_torture_read_lock_trivial,
1006 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
1007 .readunlock = rcu_torture_read_unlock_trivial,
1008 .get_gp_seq = rcu_no_completed,
1009 .sync = synchronize_rcu_tasks_rude,
1010 .exp_sync = synchronize_rcu_tasks_rude,
1011 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
1012 .get_gp_data = rcu_tasks_rude_get_gp_data,
1013 .cbflood_max = 50000,
1014 .irq_capable = 1,
1015 .name = "tasks-rude"
1016 };
1017
1018 #define TASKS_RUDE_OPS &tasks_rude_ops,
1019
1020 #else // #ifdef CONFIG_TASKS_RUDE_RCU
1021
1022 #define TASKS_RUDE_OPS
1023
1024 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU
1025
1026
1027 #ifdef CONFIG_TASKS_TRACE_RCU
1028
1029 /*
1030 * Definitions for tracing RCU-tasks torture testing.
1031 */
1032
tasks_tracing_torture_read_lock(void)1033 static int tasks_tracing_torture_read_lock(void)
1034 {
1035 rcu_read_lock_trace();
1036 return 0;
1037 }
1038
tasks_tracing_torture_read_unlock(int idx)1039 static void tasks_tracing_torture_read_unlock(int idx)
1040 {
1041 rcu_read_unlock_trace();
1042 }
1043
rcu_tasks_tracing_torture_deferred_free(struct rcu_torture * p)1044 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
1045 {
1046 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
1047 }
1048
1049 static struct rcu_torture_ops tasks_tracing_ops = {
1050 .ttype = RCU_TASKS_TRACING_FLAVOR,
1051 .init = rcu_sync_torture_init,
1052 .readlock = tasks_tracing_torture_read_lock,
1053 .read_delay = srcu_read_delay, /* just reuse srcu's version. */
1054 .readunlock = tasks_tracing_torture_read_unlock,
1055 .readlock_held = rcu_read_lock_trace_held,
1056 .get_gp_seq = rcu_no_completed,
1057 .deferred_free = rcu_tasks_tracing_torture_deferred_free,
1058 .sync = synchronize_rcu_tasks_trace,
1059 .exp_sync = synchronize_rcu_tasks_trace,
1060 .call = call_rcu_tasks_trace,
1061 .cb_barrier = rcu_barrier_tasks_trace,
1062 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
1063 .get_gp_data = rcu_tasks_trace_get_gp_data,
1064 .cbflood_max = 50000,
1065 .irq_capable = 1,
1066 .slow_gps = 1,
1067 .name = "tasks-tracing"
1068 };
1069
1070 #define TASKS_TRACING_OPS &tasks_tracing_ops,
1071
1072 #else // #ifdef CONFIG_TASKS_TRACE_RCU
1073
1074 #define TASKS_TRACING_OPS
1075
1076 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
1077
1078
rcutorture_seq_diff(unsigned long new,unsigned long old)1079 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
1080 {
1081 if (!cur_ops->gp_diff)
1082 return new - old;
1083 return cur_ops->gp_diff(new, old);
1084 }
1085
1086 /*
1087 * RCU torture priority-boost testing. Runs one real-time thread per
1088 * CPU for moderate bursts, repeatedly starting grace periods and waiting
1089 * for them to complete. If a given grace period takes too long, we assume
1090 * that priority inversion has occurred.
1091 */
1092
1093 static int old_rt_runtime = -1;
1094
rcu_torture_disable_rt_throttle(void)1095 static void rcu_torture_disable_rt_throttle(void)
1096 {
1097 /*
1098 * Disable RT throttling so that rcutorture's boost threads don't get
1099 * throttled. Only possible if rcutorture is built-in otherwise the
1100 * user should manually do this by setting the sched_rt_period_us and
1101 * sched_rt_runtime sysctls.
1102 */
1103 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
1104 return;
1105
1106 old_rt_runtime = sysctl_sched_rt_runtime;
1107 sysctl_sched_rt_runtime = -1;
1108 }
1109
rcu_torture_enable_rt_throttle(void)1110 static void rcu_torture_enable_rt_throttle(void)
1111 {
1112 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
1113 return;
1114
1115 sysctl_sched_rt_runtime = old_rt_runtime;
1116 old_rt_runtime = -1;
1117 }
1118
rcu_torture_boost_failed(unsigned long gp_state,unsigned long * start)1119 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
1120 {
1121 int cpu;
1122 static int dbg_done;
1123 unsigned long end = jiffies;
1124 bool gp_done;
1125 unsigned long j;
1126 static unsigned long last_persist;
1127 unsigned long lp;
1128 unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
1129
1130 if (end - *start > mininterval) {
1131 // Recheck after checking time to avoid false positives.
1132 smp_mb(); // Time check before grace-period check.
1133 if (cur_ops->poll_gp_state(gp_state))
1134 return false; // passed, though perhaps just barely
1135 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
1136 // At most one persisted message per boost test.
1137 j = jiffies;
1138 lp = READ_ONCE(last_persist);
1139 if (time_after(j, lp + mininterval) &&
1140 cmpxchg(&last_persist, lp, j) == lp) {
1141 if (cpu < 0)
1142 pr_info("Boost inversion persisted: QS from all CPUs\n");
1143 else
1144 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
1145 }
1146 return false; // passed on a technicality
1147 }
1148 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
1149 n_rcu_torture_boost_failure++;
1150 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
1151 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
1152 current->rt_priority, gp_state, end - *start);
1153 cur_ops->gp_kthread_dbg();
1154 // Recheck after print to flag grace period ending during splat.
1155 gp_done = cur_ops->poll_gp_state(gp_state);
1156 pr_info("Boost inversion: GP %lu %s.\n", gp_state,
1157 gp_done ? "ended already" : "still pending");
1158
1159 }
1160
1161 return true; // failed
1162 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
1163 *start = jiffies;
1164 }
1165
1166 return false; // passed
1167 }
1168
rcu_torture_boost(void * arg)1169 static int rcu_torture_boost(void *arg)
1170 {
1171 unsigned long endtime;
1172 unsigned long gp_state;
1173 unsigned long gp_state_time;
1174 unsigned long oldstarttime;
1175 unsigned long booststarttime = get_torture_init_jiffies() + test_boost_holdoff * HZ;
1176
1177 if (test_boost_holdoff <= 0 || time_after(jiffies, booststarttime)) {
1178 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
1179 } else {
1180 VERBOSE_TOROUT_STRING("rcu_torture_boost started holdoff period");
1181 while (time_before(jiffies, booststarttime)) {
1182 schedule_timeout_idle(HZ);
1183 if (kthread_should_stop())
1184 goto cleanup;
1185 }
1186 VERBOSE_TOROUT_STRING("rcu_torture_boost finished holdoff period");
1187 }
1188
1189 /* Set real-time priority. */
1190 sched_set_fifo_low(current);
1191
1192 /* Each pass through the following loop does one boost-test cycle. */
1193 do {
1194 bool failed = false; // Test failed already in this test interval
1195 bool gp_initiated = false;
1196
1197 if (kthread_should_stop())
1198 goto checkwait;
1199
1200 /* Wait for the next test interval. */
1201 oldstarttime = READ_ONCE(boost_starttime);
1202 while (time_before(jiffies, oldstarttime)) {
1203 schedule_timeout_interruptible(oldstarttime - jiffies);
1204 if (stutter_wait("rcu_torture_boost"))
1205 sched_set_fifo_low(current);
1206 if (torture_must_stop())
1207 goto checkwait;
1208 }
1209
1210 // Do one boost-test interval.
1211 endtime = oldstarttime + test_boost_duration * HZ;
1212 while (time_before(jiffies, endtime)) {
1213 // Has current GP gone too long?
1214 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1215 failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1216 // If we don't have a grace period in flight, start one.
1217 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1218 gp_state = cur_ops->start_gp_poll();
1219 gp_initiated = true;
1220 gp_state_time = jiffies;
1221 }
1222 if (stutter_wait("rcu_torture_boost")) {
1223 sched_set_fifo_low(current);
1224 // If the grace period already ended,
1225 // we don't know when that happened, so
1226 // start over.
1227 if (cur_ops->poll_gp_state(gp_state))
1228 gp_initiated = false;
1229 }
1230 if (torture_must_stop())
1231 goto checkwait;
1232 }
1233
1234 // In case the grace period extended beyond the end of the loop.
1235 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1236 rcu_torture_boost_failed(gp_state, &gp_state_time);
1237
1238 /*
1239 * Set the start time of the next test interval.
1240 * Yes, this is vulnerable to long delays, but such
1241 * delays simply cause a false negative for the next
1242 * interval. Besides, we are running at RT priority,
1243 * so delays should be relatively rare.
1244 */
1245 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
1246 if (mutex_trylock(&boost_mutex)) {
1247 if (oldstarttime == boost_starttime) {
1248 WRITE_ONCE(boost_starttime,
1249 jiffies + test_boost_interval * HZ);
1250 n_rcu_torture_boosts++;
1251 }
1252 mutex_unlock(&boost_mutex);
1253 break;
1254 }
1255 schedule_timeout_uninterruptible(HZ / 20);
1256 }
1257
1258 /* Go do the stutter. */
1259 checkwait: if (stutter_wait("rcu_torture_boost"))
1260 sched_set_fifo_low(current);
1261 } while (!torture_must_stop());
1262
1263 cleanup:
1264 /* Clean up and exit. */
1265 while (!kthread_should_stop()) {
1266 torture_shutdown_absorb("rcu_torture_boost");
1267 schedule_timeout_uninterruptible(HZ / 20);
1268 }
1269 torture_kthread_stopping("rcu_torture_boost");
1270 return 0;
1271 }
1272
1273 /*
1274 * RCU torture force-quiescent-state kthread. Repeatedly induces
1275 * bursts of calls to force_quiescent_state(), increasing the probability
1276 * of occurrence of some important types of race conditions.
1277 */
1278 static int
rcu_torture_fqs(void * arg)1279 rcu_torture_fqs(void *arg)
1280 {
1281 unsigned long fqs_resume_time;
1282 int fqs_burst_remaining;
1283 int oldnice = task_nice(current);
1284
1285 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1286 do {
1287 fqs_resume_time = jiffies + fqs_stutter * HZ;
1288 while (time_before(jiffies, fqs_resume_time) &&
1289 !kthread_should_stop()) {
1290 schedule_timeout_interruptible(HZ / 20);
1291 }
1292 fqs_burst_remaining = fqs_duration;
1293 while (fqs_burst_remaining > 0 &&
1294 !kthread_should_stop()) {
1295 cur_ops->fqs();
1296 udelay(fqs_holdoff);
1297 fqs_burst_remaining -= fqs_holdoff;
1298 }
1299 if (stutter_wait("rcu_torture_fqs"))
1300 sched_set_normal(current, oldnice);
1301 } while (!torture_must_stop());
1302 torture_kthread_stopping("rcu_torture_fqs");
1303 return 0;
1304 }
1305
1306 // Used by writers to randomly choose from the available grace-period primitives.
1307 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { };
1308 static int nsynctypes;
1309
1310 /*
1311 * Determine which grace-period primitives are available.
1312 */
rcu_torture_write_types(void)1313 static void rcu_torture_write_types(void)
1314 {
1315 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full;
1316 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp;
1317 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
1318 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync;
1319
1320 /* Initialize synctype[] array. If none set, take default. */
1321 if (!gp_cond1 &&
1322 !gp_cond_exp1 &&
1323 !gp_cond_full1 &&
1324 !gp_cond_exp_full1 &&
1325 !gp_exp1 &&
1326 !gp_poll_exp1 &&
1327 !gp_poll_exp_full1 &&
1328 !gp_normal1 &&
1329 !gp_poll1 &&
1330 !gp_poll_full1 &&
1331 !gp_sync1) {
1332 gp_cond1 = true;
1333 gp_cond_exp1 = true;
1334 gp_cond_full1 = true;
1335 gp_cond_exp_full1 = true;
1336 gp_exp1 = true;
1337 gp_poll_exp1 = true;
1338 gp_poll_exp_full1 = true;
1339 gp_normal1 = true;
1340 gp_poll1 = true;
1341 gp_poll_full1 = true;
1342 gp_sync1 = true;
1343 }
1344 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1345 synctype[nsynctypes++] = RTWS_COND_GET;
1346 pr_info("%s: Testing conditional GPs.\n", __func__);
1347 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1348 pr_alert("%s: gp_cond without primitives.\n", __func__);
1349 }
1350 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) {
1351 synctype[nsynctypes++] = RTWS_COND_GET_EXP;
1352 pr_info("%s: Testing conditional expedited GPs.\n", __func__);
1353 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
1354 pr_alert("%s: gp_cond_exp without primitives.\n", __func__);
1355 }
1356 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) {
1357 synctype[nsynctypes++] = RTWS_COND_GET_FULL;
1358 pr_info("%s: Testing conditional full-state GPs.\n", __func__);
1359 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) {
1360 pr_alert("%s: gp_cond_full without primitives.\n", __func__);
1361 }
1362 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) {
1363 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL;
1364 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__);
1365 } else if (gp_cond_exp_full &&
1366 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) {
1367 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__);
1368 }
1369 if (gp_exp1 && cur_ops->exp_sync) {
1370 synctype[nsynctypes++] = RTWS_EXP_SYNC;
1371 pr_info("%s: Testing expedited GPs.\n", __func__);
1372 } else if (gp_exp && !cur_ops->exp_sync) {
1373 pr_alert("%s: gp_exp without primitives.\n", __func__);
1374 }
1375 if (gp_normal1 && cur_ops->deferred_free) {
1376 synctype[nsynctypes++] = RTWS_DEF_FREE;
1377 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1378 } else if (gp_normal && !cur_ops->deferred_free) {
1379 pr_alert("%s: gp_normal without primitives.\n", __func__);
1380 }
1381 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state &&
1382 cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1383 synctype[nsynctypes++] = RTWS_POLL_GET;
1384 pr_info("%s: Testing polling GPs.\n", __func__);
1385 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1386 pr_alert("%s: gp_poll without primitives.\n", __func__);
1387 }
1388 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full
1389 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) {
1390 synctype[nsynctypes++] = RTWS_POLL_GET_FULL;
1391 pr_info("%s: Testing polling full-state GPs.\n", __func__);
1392 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) {
1393 pr_alert("%s: gp_poll_full without primitives.\n", __func__);
1394 }
1395 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
1396 synctype[nsynctypes++] = RTWS_POLL_GET_EXP;
1397 pr_info("%s: Testing polling expedited GPs.\n", __func__);
1398 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
1399 pr_alert("%s: gp_poll_exp without primitives.\n", __func__);
1400 }
1401 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) {
1402 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL;
1403 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__);
1404 } else if (gp_poll_exp_full &&
1405 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) {
1406 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__);
1407 }
1408 if (gp_sync1 && cur_ops->sync) {
1409 synctype[nsynctypes++] = RTWS_SYNC;
1410 pr_info("%s: Testing normal GPs.\n", __func__);
1411 } else if (gp_sync && !cur_ops->sync) {
1412 pr_alert("%s: gp_sync without primitives.\n", __func__);
1413 }
1414 pr_alert("%s: Testing %d update types.\n", __func__, nsynctypes);
1415 pr_info("%s: gp_cond_wi %d gp_cond_wi_exp %d gp_poll_wi %d gp_poll_wi_exp %d\n", __func__, gp_cond_wi, gp_cond_wi_exp, gp_poll_wi, gp_poll_wi_exp);
1416 }
1417
1418 /*
1419 * Do the specified rcu_torture_writer() synchronous grace period,
1420 * while also testing out the polled APIs. Note well that the single-CPU
1421 * grace-period optimizations must be accounted for.
1422 */
do_rtws_sync(struct torture_random_state * trsp,void (* sync)(void))1423 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void))
1424 {
1425 unsigned long cookie;
1426 struct rcu_gp_oldstate cookie_full;
1427 bool dopoll;
1428 bool dopoll_full;
1429 unsigned long r = torture_random(trsp);
1430
1431 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300);
1432 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00);
1433 if (dopoll || dopoll_full)
1434 cpus_read_lock();
1435 if (dopoll)
1436 cookie = cur_ops->get_gp_state();
1437 if (dopoll_full)
1438 cur_ops->get_gp_state_full(&cookie_full);
1439 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full))
1440 sync();
1441 sync();
1442 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie),
1443 "%s: Cookie check 3 failed %pS() online %*pbl.",
1444 __func__, sync, cpumask_pr_args(cpu_online_mask));
1445 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full),
1446 "%s: Cookie check 4 failed %pS() online %*pbl",
1447 __func__, sync, cpumask_pr_args(cpu_online_mask));
1448 if (dopoll || dopoll_full)
1449 cpus_read_unlock();
1450 }
1451
1452 /*
1453 * RCU torture writer kthread. Repeatedly substitutes a new structure
1454 * for that pointed to by rcu_torture_current, freeing the old structure
1455 * after a series of grace periods (the "pipeline").
1456 */
1457 static int
rcu_torture_writer(void * arg)1458 rcu_torture_writer(void *arg)
1459 {
1460 bool boot_ended;
1461 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1462 unsigned long cookie;
1463 struct rcu_gp_oldstate cookie_full;
1464 int expediting = 0;
1465 unsigned long gp_snap;
1466 unsigned long gp_snap1;
1467 struct rcu_gp_oldstate gp_snap_full;
1468 struct rcu_gp_oldstate gp_snap1_full;
1469 int i;
1470 int idx;
1471 int oldnice = task_nice(current);
1472 struct rcu_gp_oldstate *rgo = NULL;
1473 int rgo_size = 0;
1474 struct rcu_torture *rp;
1475 struct rcu_torture *old_rp;
1476 static DEFINE_TORTURE_RANDOM(rand);
1477 unsigned long stallsdone = jiffies;
1478 bool stutter_waited;
1479 unsigned long *ulo = NULL;
1480 int ulo_size = 0;
1481
1482 // If a new stall test is added, this must be adjusted.
1483 if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu)
1484 stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) *
1485 HZ * (stall_cpu_repeat + 1);
1486 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1487 if (!can_expedite)
1488 pr_alert("%s" TORTURE_FLAG
1489 " GP expediting controlled from boot/sysfs for %s.\n",
1490 torture_type, cur_ops->name);
1491 if (WARN_ONCE(nsynctypes == 0,
1492 "%s: No update-side primitives.\n", __func__)) {
1493 /*
1494 * No updates primitives, so don't try updating.
1495 * The resulting test won't be testing much, hence the
1496 * above WARN_ONCE().
1497 */
1498 rcu_torture_writer_state = RTWS_STOPPING;
1499 torture_kthread_stopping("rcu_torture_writer");
1500 return 0;
1501 }
1502 if (cur_ops->poll_active > 0) {
1503 ulo = kzalloc(cur_ops->poll_active * sizeof(ulo[0]), GFP_KERNEL);
1504 if (!WARN_ON(!ulo))
1505 ulo_size = cur_ops->poll_active;
1506 }
1507 if (cur_ops->poll_active_full > 0) {
1508 rgo = kzalloc(cur_ops->poll_active_full * sizeof(rgo[0]), GFP_KERNEL);
1509 if (!WARN_ON(!rgo))
1510 rgo_size = cur_ops->poll_active_full;
1511 }
1512
1513 do {
1514 rcu_torture_writer_state = RTWS_FIXED_DELAY;
1515 torture_hrtimeout_us(500, 1000, &rand);
1516 rp = rcu_torture_alloc();
1517 if (rp == NULL)
1518 continue;
1519 rp->rtort_pipe_count = 0;
1520 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
1521 rcu_torture_writer_state = RTWS_DELAY;
1522 udelay(torture_random(&rand) & 0x3ff);
1523 rcu_torture_writer_state = RTWS_REPLACE;
1524 old_rp = rcu_dereference_check(rcu_torture_current,
1525 current == writer_task);
1526 rp->rtort_mbtest = 1;
1527 rcu_assign_pointer(rcu_torture_current, rp);
1528 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1529 if (old_rp) {
1530 i = old_rp->rtort_pipe_count;
1531 if (i > RCU_TORTURE_PIPE_LEN)
1532 i = RCU_TORTURE_PIPE_LEN;
1533 atomic_inc(&rcu_torture_wcount[i]);
1534 WRITE_ONCE(old_rp->rtort_pipe_count,
1535 old_rp->rtort_pipe_count + 1);
1536 ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count);
1537
1538 // Make sure readers block polled grace periods.
1539 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1540 idx = cur_ops->readlock();
1541 cookie = cur_ops->get_gp_state();
1542 WARN_ONCE(cur_ops->poll_gp_state(cookie),
1543 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1544 __func__,
1545 rcu_torture_writer_state_getname(),
1546 rcu_torture_writer_state,
1547 cookie, cur_ops->get_gp_state());
1548 if (cur_ops->get_comp_state) {
1549 cookie = cur_ops->get_comp_state();
1550 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
1551 }
1552 cur_ops->readunlock(idx);
1553 }
1554 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
1555 idx = cur_ops->readlock();
1556 cur_ops->get_gp_state_full(&cookie_full);
1557 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
1558 "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
1559 __func__,
1560 rcu_torture_writer_state_getname(),
1561 rcu_torture_writer_state,
1562 cpumask_pr_args(cpu_online_mask));
1563 if (cur_ops->get_comp_state_full) {
1564 cur_ops->get_comp_state_full(&cookie_full);
1565 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
1566 }
1567 cur_ops->readunlock(idx);
1568 }
1569 switch (synctype[torture_random(&rand) % nsynctypes]) {
1570 case RTWS_DEF_FREE:
1571 rcu_torture_writer_state = RTWS_DEF_FREE;
1572 cur_ops->deferred_free(old_rp);
1573 break;
1574 case RTWS_EXP_SYNC:
1575 rcu_torture_writer_state = RTWS_EXP_SYNC;
1576 do_rtws_sync(&rand, cur_ops->exp_sync);
1577 rcu_torture_pipe_update(old_rp);
1578 break;
1579 case RTWS_COND_GET:
1580 rcu_torture_writer_state = RTWS_COND_GET;
1581 gp_snap = cur_ops->get_gp_state();
1582 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi,
1583 1000, &rand);
1584 rcu_torture_writer_state = RTWS_COND_SYNC;
1585 cur_ops->cond_sync(gp_snap);
1586 rcu_torture_pipe_update(old_rp);
1587 break;
1588 case RTWS_COND_GET_EXP:
1589 rcu_torture_writer_state = RTWS_COND_GET_EXP;
1590 gp_snap = cur_ops->get_gp_state_exp();
1591 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp,
1592 1000, &rand);
1593 rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
1594 cur_ops->cond_sync_exp(gp_snap);
1595 rcu_torture_pipe_update(old_rp);
1596 break;
1597 case RTWS_COND_GET_FULL:
1598 rcu_torture_writer_state = RTWS_COND_GET_FULL;
1599 cur_ops->get_gp_state_full(&gp_snap_full);
1600 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi,
1601 1000, &rand);
1602 rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
1603 cur_ops->cond_sync_full(&gp_snap_full);
1604 rcu_torture_pipe_update(old_rp);
1605 break;
1606 case RTWS_COND_GET_EXP_FULL:
1607 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
1608 cur_ops->get_gp_state_full(&gp_snap_full);
1609 torture_hrtimeout_us(torture_random(&rand) % gp_cond_wi_exp,
1610 1000, &rand);
1611 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
1612 cur_ops->cond_sync_exp_full(&gp_snap_full);
1613 rcu_torture_pipe_update(old_rp);
1614 break;
1615 case RTWS_POLL_GET:
1616 rcu_torture_writer_state = RTWS_POLL_GET;
1617 for (i = 0; i < ulo_size; i++)
1618 ulo[i] = cur_ops->get_comp_state();
1619 gp_snap = cur_ops->start_gp_poll();
1620 rcu_torture_writer_state = RTWS_POLL_WAIT;
1621 while (!cur_ops->poll_gp_state(gp_snap)) {
1622 gp_snap1 = cur_ops->get_gp_state();
1623 for (i = 0; i < ulo_size; i++)
1624 if (cur_ops->poll_gp_state(ulo[i]) ||
1625 cur_ops->same_gp_state(ulo[i], gp_snap1)) {
1626 ulo[i] = gp_snap1;
1627 break;
1628 }
1629 WARN_ON_ONCE(ulo_size > 0 && i >= ulo_size);
1630 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi,
1631 1000, &rand);
1632 }
1633 rcu_torture_pipe_update(old_rp);
1634 break;
1635 case RTWS_POLL_GET_FULL:
1636 rcu_torture_writer_state = RTWS_POLL_GET_FULL;
1637 for (i = 0; i < rgo_size; i++)
1638 cur_ops->get_comp_state_full(&rgo[i]);
1639 cur_ops->start_gp_poll_full(&gp_snap_full);
1640 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
1641 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1642 cur_ops->get_gp_state_full(&gp_snap1_full);
1643 for (i = 0; i < rgo_size; i++)
1644 if (cur_ops->poll_gp_state_full(&rgo[i]) ||
1645 cur_ops->same_gp_state_full(&rgo[i],
1646 &gp_snap1_full)) {
1647 rgo[i] = gp_snap1_full;
1648 break;
1649 }
1650 WARN_ON_ONCE(rgo_size > 0 && i >= rgo_size);
1651 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi,
1652 1000, &rand);
1653 }
1654 rcu_torture_pipe_update(old_rp);
1655 break;
1656 case RTWS_POLL_GET_EXP:
1657 rcu_torture_writer_state = RTWS_POLL_GET_EXP;
1658 gp_snap = cur_ops->start_gp_poll_exp();
1659 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
1660 while (!cur_ops->poll_gp_state_exp(gp_snap))
1661 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp,
1662 1000, &rand);
1663 rcu_torture_pipe_update(old_rp);
1664 break;
1665 case RTWS_POLL_GET_EXP_FULL:
1666 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL;
1667 cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1668 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
1669 while (!cur_ops->poll_gp_state_full(&gp_snap_full))
1670 torture_hrtimeout_us(torture_random(&rand) % gp_poll_wi_exp,
1671 1000, &rand);
1672 rcu_torture_pipe_update(old_rp);
1673 break;
1674 case RTWS_SYNC:
1675 rcu_torture_writer_state = RTWS_SYNC;
1676 do_rtws_sync(&rand, cur_ops->sync);
1677 rcu_torture_pipe_update(old_rp);
1678 break;
1679 default:
1680 WARN_ON_ONCE(1);
1681 break;
1682 }
1683 }
1684 WRITE_ONCE(rcu_torture_current_version,
1685 rcu_torture_current_version + 1);
1686 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1687 if (can_expedite &&
1688 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1689 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1690 if (expediting >= 0)
1691 rcu_expedite_gp();
1692 else
1693 rcu_unexpedite_gp();
1694 if (++expediting > 3)
1695 expediting = -expediting;
1696 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1697 can_expedite = !rcu_gp_is_expedited() &&
1698 !rcu_gp_is_normal();
1699 }
1700 rcu_torture_writer_state = RTWS_STUTTER;
1701 boot_ended = rcu_inkernel_boot_has_ended();
1702 stutter_waited = stutter_wait("rcu_torture_writer");
1703 if (stutter_waited &&
1704 !atomic_read(&rcu_fwd_cb_nodelay) &&
1705 !cur_ops->slow_gps &&
1706 !torture_must_stop() &&
1707 boot_ended &&
1708 time_after(jiffies, stallsdone))
1709 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1710 if (list_empty(&rcu_tortures[i].rtort_free) &&
1711 rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) {
1712 tracing_off();
1713 if (cur_ops->gp_kthread_dbg)
1714 cur_ops->gp_kthread_dbg();
1715 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1716 rcu_ftrace_dump(DUMP_ALL);
1717 }
1718 if (stutter_waited)
1719 sched_set_normal(current, oldnice);
1720 } while (!torture_must_stop());
1721 rcu_torture_current = NULL; // Let stats task know that we are done.
1722 /* Reset expediting back to unexpedited. */
1723 if (expediting > 0)
1724 expediting = -expediting;
1725 while (can_expedite && expediting++ < 0)
1726 rcu_unexpedite_gp();
1727 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1728 if (!can_expedite)
1729 pr_alert("%s" TORTURE_FLAG
1730 " Dynamic grace-period expediting was disabled.\n",
1731 torture_type);
1732 kfree(ulo);
1733 kfree(rgo);
1734 rcu_torture_writer_state = RTWS_STOPPING;
1735 torture_kthread_stopping("rcu_torture_writer");
1736 return 0;
1737 }
1738
1739 /*
1740 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1741 * delay between calls.
1742 */
1743 static int
rcu_torture_fakewriter(void * arg)1744 rcu_torture_fakewriter(void *arg)
1745 {
1746 unsigned long gp_snap;
1747 struct rcu_gp_oldstate gp_snap_full;
1748 DEFINE_TORTURE_RANDOM(rand);
1749
1750 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1751 set_user_nice(current, MAX_NICE);
1752
1753 if (WARN_ONCE(nsynctypes == 0,
1754 "%s: No update-side primitives.\n", __func__)) {
1755 /*
1756 * No updates primitives, so don't try updating.
1757 * The resulting test won't be testing much, hence the
1758 * above WARN_ONCE().
1759 */
1760 torture_kthread_stopping("rcu_torture_fakewriter");
1761 return 0;
1762 }
1763
1764 do {
1765 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1766 if (cur_ops->cb_barrier != NULL &&
1767 torture_random(&rand) % (nrealfakewriters * 8) == 0) {
1768 cur_ops->cb_barrier();
1769 } else {
1770 switch (synctype[torture_random(&rand) % nsynctypes]) {
1771 case RTWS_DEF_FREE:
1772 break;
1773 case RTWS_EXP_SYNC:
1774 cur_ops->exp_sync();
1775 break;
1776 case RTWS_COND_GET:
1777 gp_snap = cur_ops->get_gp_state();
1778 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1779 cur_ops->cond_sync(gp_snap);
1780 break;
1781 case RTWS_COND_GET_EXP:
1782 gp_snap = cur_ops->get_gp_state_exp();
1783 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1784 cur_ops->cond_sync_exp(gp_snap);
1785 break;
1786 case RTWS_COND_GET_FULL:
1787 cur_ops->get_gp_state_full(&gp_snap_full);
1788 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1789 cur_ops->cond_sync_full(&gp_snap_full);
1790 break;
1791 case RTWS_COND_GET_EXP_FULL:
1792 cur_ops->get_gp_state_full(&gp_snap_full);
1793 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1794 cur_ops->cond_sync_exp_full(&gp_snap_full);
1795 break;
1796 case RTWS_POLL_GET:
1797 if (cur_ops->start_poll_irqsoff)
1798 local_irq_disable();
1799 gp_snap = cur_ops->start_gp_poll();
1800 if (cur_ops->start_poll_irqsoff)
1801 local_irq_enable();
1802 while (!cur_ops->poll_gp_state(gp_snap)) {
1803 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1804 &rand);
1805 }
1806 break;
1807 case RTWS_POLL_GET_FULL:
1808 if (cur_ops->start_poll_irqsoff)
1809 local_irq_disable();
1810 cur_ops->start_gp_poll_full(&gp_snap_full);
1811 if (cur_ops->start_poll_irqsoff)
1812 local_irq_enable();
1813 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1814 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1815 &rand);
1816 }
1817 break;
1818 case RTWS_POLL_GET_EXP:
1819 gp_snap = cur_ops->start_gp_poll_exp();
1820 while (!cur_ops->poll_gp_state_exp(gp_snap)) {
1821 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1822 &rand);
1823 }
1824 break;
1825 case RTWS_POLL_GET_EXP_FULL:
1826 cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1827 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1828 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1829 &rand);
1830 }
1831 break;
1832 case RTWS_SYNC:
1833 cur_ops->sync();
1834 break;
1835 default:
1836 WARN_ON_ONCE(1);
1837 break;
1838 }
1839 }
1840 stutter_wait("rcu_torture_fakewriter");
1841 } while (!torture_must_stop());
1842
1843 torture_kthread_stopping("rcu_torture_fakewriter");
1844 return 0;
1845 }
1846
rcu_torture_timer_cb(struct rcu_head * rhp)1847 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1848 {
1849 kfree(rhp);
1850 }
1851
1852 // Set up and carry out testing of RCU's global memory ordering
rcu_torture_reader_do_mbchk(long myid,struct rcu_torture * rtp,struct torture_random_state * trsp)1853 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1854 struct torture_random_state *trsp)
1855 {
1856 unsigned long loops;
1857 int noc = torture_num_online_cpus();
1858 int rdrchked;
1859 int rdrchker;
1860 struct rcu_torture_reader_check *rtrcp; // Me.
1861 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1862 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1863 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1864
1865 if (myid < 0)
1866 return; // Don't try this from timer handlers.
1867
1868 // Increment my counter.
1869 rtrcp = &rcu_torture_reader_mbchk[myid];
1870 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1871
1872 // Attempt to assign someone else some checking work.
1873 rdrchked = torture_random(trsp) % nrealreaders;
1874 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1875 rdrchker = torture_random(trsp) % nrealreaders;
1876 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1877 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1878 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1879 !READ_ONCE(rtp->rtort_chkp) &&
1880 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1881 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1882 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1883 rtrcp->rtc_chkrdr = rdrchked;
1884 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1885 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1886 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1887 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1888 }
1889
1890 // If assigned some completed work, do it!
1891 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1892 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1893 return; // No work or work not yet ready.
1894 rdrchked = rtrcp_assigner->rtc_chkrdr;
1895 if (WARN_ON_ONCE(rdrchked < 0))
1896 return;
1897 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1898 loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1899 atomic_inc(&n_rcu_torture_mbchk_tries);
1900 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1901 atomic_inc(&n_rcu_torture_mbchk_fail);
1902 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1903 rtrcp_assigner->rtc_ready = 0;
1904 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1905 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1906 }
1907
1908 // Verify the specified RCUTORTURE_RDR* state.
1909 #define ROEC_ARGS "%s %s: Current %#x To add %#x To remove %#x preempt_count() %#x\n", __func__, s, curstate, new, old, preempt_count()
rcutorture_one_extend_check(char * s,int curstate,int new,int old,bool insoftirq)1910 static void rcutorture_one_extend_check(char *s, int curstate, int new, int old, bool insoftirq)
1911 {
1912 int mask;
1913
1914 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST_CHK_RDR_STATE))
1915 return;
1916
1917 WARN_ONCE(!(curstate & RCUTORTURE_RDR_IRQ) && irqs_disabled(), ROEC_ARGS);
1918 WARN_ONCE((curstate & RCUTORTURE_RDR_IRQ) && !irqs_disabled(), ROEC_ARGS);
1919
1920 // If CONFIG_PREEMPT_COUNT=n, further checks are unreliable.
1921 if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
1922 return;
1923
1924 WARN_ONCE((curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) &&
1925 !(preempt_count() & SOFTIRQ_MASK), ROEC_ARGS);
1926 WARN_ONCE((curstate & (RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED)) &&
1927 !(preempt_count() & PREEMPT_MASK), ROEC_ARGS);
1928 WARN_ONCE(cur_ops->readlock_nesting &&
1929 (curstate & (RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2)) &&
1930 cur_ops->readlock_nesting() == 0, ROEC_ARGS);
1931
1932 // Timer handlers have all sorts of stuff disabled, so ignore
1933 // unintended disabling.
1934 if (insoftirq)
1935 return;
1936
1937 WARN_ONCE(cur_ops->extendables &&
1938 !(curstate & (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH)) &&
1939 (preempt_count() & SOFTIRQ_MASK), ROEC_ARGS);
1940
1941 /*
1942 * non-preemptible RCU in a preemptible kernel uses preempt_disable()
1943 * as rcu_read_lock().
1944 */
1945 mask = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1946 if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
1947 mask |= RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
1948
1949 WARN_ONCE(cur_ops->extendables && !(curstate & mask) &&
1950 (preempt_count() & PREEMPT_MASK), ROEC_ARGS);
1951
1952 /*
1953 * non-preemptible RCU in a preemptible kernel uses "preempt_count() &
1954 * PREEMPT_MASK" as ->readlock_nesting().
1955 */
1956 mask = RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
1957 if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
1958 mask |= RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1959
1960 WARN_ONCE(cur_ops->readlock_nesting && !(curstate & mask) &&
1961 cur_ops->readlock_nesting() > 0, ROEC_ARGS);
1962 }
1963
1964 /*
1965 * Do one extension of an RCU read-side critical section using the
1966 * current reader state in readstate (set to zero for initial entry
1967 * to extended critical section), set the new state as specified by
1968 * newstate (set to zero for final exit from extended critical section),
1969 * and random-number-generator state in trsp. If this is neither the
1970 * beginning or end of the critical section and if there was actually a
1971 * change, do a ->read_delay().
1972 */
rcutorture_one_extend(int * readstate,int newstate,bool insoftirq,struct torture_random_state * trsp,struct rt_read_seg * rtrsp)1973 static void rcutorture_one_extend(int *readstate, int newstate, bool insoftirq,
1974 struct torture_random_state *trsp,
1975 struct rt_read_seg *rtrsp)
1976 {
1977 bool first;
1978 unsigned long flags;
1979 int idxnew1 = -1;
1980 int idxnew2 = -1;
1981 int idxold1 = *readstate;
1982 int idxold2 = idxold1;
1983 int statesnew = ~*readstate & newstate;
1984 int statesold = *readstate & ~newstate;
1985
1986 first = idxold1 == 0;
1987 WARN_ON_ONCE(idxold2 < 0);
1988 WARN_ON_ONCE(idxold2 & ~RCUTORTURE_RDR_ALLBITS);
1989 rcutorture_one_extend_check("before change", idxold1, statesnew, statesold, insoftirq);
1990 rtrsp->rt_readstate = newstate;
1991
1992 /* First, put new protection in place to avoid critical-section gap. */
1993 if (statesnew & RCUTORTURE_RDR_BH)
1994 local_bh_disable();
1995 if (statesnew & RCUTORTURE_RDR_RBH)
1996 rcu_read_lock_bh();
1997 if (statesnew & RCUTORTURE_RDR_IRQ)
1998 local_irq_disable();
1999 if (statesnew & RCUTORTURE_RDR_PREEMPT)
2000 preempt_disable();
2001 if (statesnew & RCUTORTURE_RDR_SCHED)
2002 rcu_read_lock_sched();
2003 if (statesnew & RCUTORTURE_RDR_RCU_1)
2004 idxnew1 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1;
2005 if (statesnew & RCUTORTURE_RDR_RCU_2)
2006 idxnew2 = (cur_ops->readlock() << RCUTORTURE_RDR_SHIFT_2) & RCUTORTURE_RDR_MASK_2;
2007
2008 // Complain unless both the old and the new protection is in place.
2009 rcutorture_one_extend_check("during change",
2010 idxold1 | statesnew, statesnew, statesold, insoftirq);
2011
2012 // Sample CPU under both sets of protections to reduce confusion.
2013 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) {
2014 int cpu = raw_smp_processor_id();
2015 rtrsp->rt_cpu = cpu;
2016 if (!first) {
2017 rtrsp[-1].rt_end_cpu = cpu;
2018 if (cur_ops->reader_blocked)
2019 rtrsp[-1].rt_preempted = cur_ops->reader_blocked();
2020 }
2021 }
2022 // Sample grace-period sequence number, as good a place as any.
2023 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP) && cur_ops->gather_gp_seqs) {
2024 rtrsp->rt_gp_seq = cur_ops->gather_gp_seqs();
2025 rtrsp->rt_ts = ktime_get_mono_fast_ns();
2026 if (!first)
2027 rtrsp[-1].rt_gp_seq_end = rtrsp->rt_gp_seq;
2028 }
2029
2030 /*
2031 * Next, remove old protection, in decreasing order of strength
2032 * to avoid unlock paths that aren't safe in the stronger
2033 * context. Namely: BH can not be enabled with disabled interrupts.
2034 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
2035 * context.
2036 */
2037 if (statesold & RCUTORTURE_RDR_IRQ)
2038 local_irq_enable();
2039 if (statesold & RCUTORTURE_RDR_PREEMPT)
2040 preempt_enable();
2041 if (statesold & RCUTORTURE_RDR_SCHED)
2042 rcu_read_unlock_sched();
2043 if (statesold & RCUTORTURE_RDR_BH)
2044 local_bh_enable();
2045 if (statesold & RCUTORTURE_RDR_RBH)
2046 rcu_read_unlock_bh();
2047 if (statesold & RCUTORTURE_RDR_RCU_2) {
2048 cur_ops->readunlock((idxold2 & RCUTORTURE_RDR_MASK_2) >> RCUTORTURE_RDR_SHIFT_2);
2049 WARN_ON_ONCE(idxnew2 != -1);
2050 idxold2 = 0;
2051 }
2052 if (statesold & RCUTORTURE_RDR_RCU_1) {
2053 bool lockit;
2054
2055 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
2056 if (lockit)
2057 raw_spin_lock_irqsave(¤t->pi_lock, flags);
2058 cur_ops->readunlock((idxold1 & RCUTORTURE_RDR_MASK_1) >> RCUTORTURE_RDR_SHIFT_1);
2059 WARN_ON_ONCE(idxnew1 != -1);
2060 idxold1 = 0;
2061 if (lockit)
2062 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
2063 }
2064
2065 /* Delay if neither beginning nor end and there was a change. */
2066 if ((statesnew || statesold) && *readstate && newstate)
2067 cur_ops->read_delay(trsp, rtrsp);
2068
2069 /* Update the reader state. */
2070 if (idxnew1 == -1)
2071 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
2072 WARN_ON_ONCE(idxnew1 < 0);
2073 if (idxnew2 == -1)
2074 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
2075 WARN_ON_ONCE(idxnew2 < 0);
2076 *readstate = idxnew1 | idxnew2 | newstate;
2077 WARN_ON_ONCE(*readstate < 0);
2078 if (WARN_ON_ONCE(*readstate & ~RCUTORTURE_RDR_ALLBITS))
2079 pr_info("Unexpected readstate value of %#x\n", *readstate);
2080 rcutorture_one_extend_check("after change", *readstate, statesnew, statesold, insoftirq);
2081 }
2082
2083 /* Return the biggest extendables mask given current RCU and boot parameters. */
rcutorture_extend_mask_max(void)2084 static int rcutorture_extend_mask_max(void)
2085 {
2086 int mask;
2087
2088 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
2089 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
2090 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
2091 return mask;
2092 }
2093
2094 /* Return a random protection state mask, but with at least one bit set. */
2095 static int
rcutorture_extend_mask(int oldmask,struct torture_random_state * trsp)2096 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
2097 {
2098 int mask = rcutorture_extend_mask_max();
2099 unsigned long randmask1 = torture_random(trsp);
2100 unsigned long randmask2 = randmask1 >> 3;
2101 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
2102 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
2103 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
2104
2105 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); // Can't have reader idx bits.
2106 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
2107 if (!(randmask1 & 0x7))
2108 mask = mask & randmask2;
2109 else
2110 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
2111
2112 // Can't have nested RCU reader without outer RCU reader.
2113 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
2114 if (oldmask & RCUTORTURE_RDR_RCU_1)
2115 mask &= ~RCUTORTURE_RDR_RCU_2;
2116 else
2117 mask |= RCUTORTURE_RDR_RCU_1;
2118 }
2119
2120 /*
2121 * Can't enable bh w/irq disabled.
2122 */
2123 if (mask & RCUTORTURE_RDR_IRQ)
2124 mask |= oldmask & bhs;
2125
2126 /*
2127 * Ideally these sequences would be detected in debug builds
2128 * (regardless of RT), but until then don't stop testing
2129 * them on non-RT.
2130 */
2131 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
2132 /* Can't modify BH in atomic context */
2133 if (oldmask & preempts_irq)
2134 mask &= ~bhs;
2135 if ((oldmask | mask) & preempts_irq)
2136 mask |= oldmask & bhs;
2137 }
2138
2139 return mask ?: RCUTORTURE_RDR_RCU_1;
2140 }
2141
2142 /*
2143 * Do a randomly selected number of extensions of an existing RCU read-side
2144 * critical section.
2145 */
2146 static struct rt_read_seg *
rcutorture_loop_extend(int * readstate,bool insoftirq,struct torture_random_state * trsp,struct rt_read_seg * rtrsp)2147 rcutorture_loop_extend(int *readstate, bool insoftirq, struct torture_random_state *trsp,
2148 struct rt_read_seg *rtrsp)
2149 {
2150 int i;
2151 int j;
2152 int mask = rcutorture_extend_mask_max();
2153
2154 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
2155 if (!((mask - 1) & mask))
2156 return rtrsp; /* Current RCU reader not extendable. */
2157 /* Bias towards larger numbers of loops. */
2158 i = torture_random(trsp);
2159 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
2160 for (j = 0; j < i; j++) {
2161 mask = rcutorture_extend_mask(*readstate, trsp);
2162 rcutorture_one_extend(readstate, mask, insoftirq, trsp, &rtrsp[j]);
2163 }
2164 return &rtrsp[j];
2165 }
2166
2167 /*
2168 * Do one read-side critical section, returning false if there was
2169 * no data to read. Can be invoked both from process context and
2170 * from a timer handler.
2171 */
rcu_torture_one_read(struct torture_random_state * trsp,long myid)2172 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
2173 {
2174 bool checkpolling = !(torture_random(trsp) & 0xfff);
2175 unsigned long cookie;
2176 struct rcu_gp_oldstate cookie_full;
2177 int i;
2178 unsigned long started;
2179 unsigned long completed;
2180 int newstate;
2181 struct rcu_torture *p;
2182 int pipe_count;
2183 bool preempted = false;
2184 int readstate = 0;
2185 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
2186 struct rt_read_seg *rtrsp = &rtseg[0];
2187 struct rt_read_seg *rtrsp1;
2188 unsigned long long ts;
2189
2190 WARN_ON_ONCE(!rcu_is_watching());
2191 newstate = rcutorture_extend_mask(readstate, trsp);
2192 rcutorture_one_extend(&readstate, newstate, myid < 0, trsp, rtrsp++);
2193 if (checkpolling) {
2194 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2195 cookie = cur_ops->get_gp_state();
2196 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2197 cur_ops->get_gp_state_full(&cookie_full);
2198 }
2199 started = cur_ops->get_gp_seq();
2200 ts = rcu_trace_clock_local();
2201 p = rcu_dereference_check(rcu_torture_current,
2202 !cur_ops->readlock_held || cur_ops->readlock_held());
2203 if (p == NULL) {
2204 /* Wait for rcu_torture_writer to get underway */
2205 rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp);
2206 return false;
2207 }
2208 if (p->rtort_mbtest == 0)
2209 atomic_inc(&n_rcu_torture_mberror);
2210 rcu_torture_reader_do_mbchk(myid, p, trsp);
2211 rtrsp = rcutorture_loop_extend(&readstate, myid < 0, trsp, rtrsp);
2212 preempt_disable();
2213 pipe_count = READ_ONCE(p->rtort_pipe_count);
2214 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
2215 // Should not happen in a correct RCU implementation,
2216 // happens quite often for torture_type=busted.
2217 pipe_count = RCU_TORTURE_PIPE_LEN;
2218 }
2219 completed = cur_ops->get_gp_seq();
2220 if (pipe_count > 1) {
2221 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
2222 ts, started, completed);
2223 rcu_ftrace_dump(DUMP_ALL);
2224 }
2225 __this_cpu_inc(rcu_torture_count[pipe_count]);
2226 completed = rcutorture_seq_diff(completed, started);
2227 if (completed > RCU_TORTURE_PIPE_LEN) {
2228 /* Should not happen, but... */
2229 completed = RCU_TORTURE_PIPE_LEN;
2230 }
2231 __this_cpu_inc(rcu_torture_batch[completed]);
2232 preempt_enable();
2233 if (checkpolling) {
2234 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2235 WARN_ONCE(cur_ops->poll_gp_state(cookie),
2236 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
2237 __func__,
2238 rcu_torture_writer_state_getname(),
2239 rcu_torture_writer_state,
2240 cookie, cur_ops->get_gp_state());
2241 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2242 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
2243 "%s: Cookie check 6 failed %s(%d) online %*pbl\n",
2244 __func__,
2245 rcu_torture_writer_state_getname(),
2246 rcu_torture_writer_state,
2247 cpumask_pr_args(cpu_online_mask));
2248 }
2249 if (cur_ops->reader_blocked)
2250 preempted = cur_ops->reader_blocked();
2251 rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp);
2252 WARN_ON_ONCE(readstate);
2253 // This next splat is expected behavior if leakpointer, especially
2254 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
2255 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
2256
2257 /* If error or close call, record the sequence of reader protections. */
2258 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
2259 i = 0;
2260 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
2261 err_segs[i++] = *rtrsp1;
2262 rt_read_nsegs = i;
2263 rt_read_preempted = preempted;
2264 }
2265
2266 return true;
2267 }
2268
2269 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
2270
2271 /*
2272 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
2273 * incrementing the corresponding element of the pipeline array. The
2274 * counter in the element should never be greater than 1, otherwise, the
2275 * RCU implementation is broken.
2276 */
rcu_torture_timer(struct timer_list * unused)2277 static void rcu_torture_timer(struct timer_list *unused)
2278 {
2279 atomic_long_inc(&n_rcu_torture_timers);
2280 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
2281
2282 /* Test call_rcu() invocation from interrupt handler. */
2283 if (cur_ops->call) {
2284 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
2285
2286 if (rhp)
2287 cur_ops->call(rhp, rcu_torture_timer_cb);
2288 }
2289 }
2290
2291 /*
2292 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
2293 * incrementing the corresponding element of the pipeline array. The
2294 * counter in the element should never be greater than 1, otherwise, the
2295 * RCU implementation is broken.
2296 */
2297 static int
rcu_torture_reader(void * arg)2298 rcu_torture_reader(void *arg)
2299 {
2300 unsigned long lastsleep = jiffies;
2301 long myid = (long)arg;
2302 int mynumonline = myid;
2303 DEFINE_TORTURE_RANDOM(rand);
2304 struct timer_list t;
2305
2306 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
2307 set_user_nice(current, MAX_NICE);
2308 if (irqreader && cur_ops->irq_capable)
2309 timer_setup_on_stack(&t, rcu_torture_timer, 0);
2310 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2311 do {
2312 if (irqreader && cur_ops->irq_capable) {
2313 if (!timer_pending(&t))
2314 mod_timer(&t, jiffies + 1);
2315 }
2316 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
2317 schedule_timeout_interruptible(HZ);
2318 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
2319 torture_hrtimeout_us(500, 1000, &rand);
2320 lastsleep = jiffies + 10;
2321 }
2322 while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
2323 schedule_timeout_interruptible(HZ / 5);
2324 stutter_wait("rcu_torture_reader");
2325 } while (!torture_must_stop());
2326 if (irqreader && cur_ops->irq_capable) {
2327 del_timer_sync(&t);
2328 destroy_timer_on_stack(&t);
2329 }
2330 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2331 torture_kthread_stopping("rcu_torture_reader");
2332 return 0;
2333 }
2334
2335 /*
2336 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to
2337 * increase race probabilities and fuzzes the interval between toggling.
2338 */
rcu_nocb_toggle(void * arg)2339 static int rcu_nocb_toggle(void *arg)
2340 {
2341 int cpu;
2342 int maxcpu = -1;
2343 int oldnice = task_nice(current);
2344 long r;
2345 DEFINE_TORTURE_RANDOM(rand);
2346 ktime_t toggle_delay;
2347 unsigned long toggle_fuzz;
2348 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
2349
2350 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
2351 while (!rcu_inkernel_boot_has_ended())
2352 schedule_timeout_interruptible(HZ / 10);
2353 for_each_possible_cpu(cpu)
2354 maxcpu = cpu;
2355 WARN_ON(maxcpu < 0);
2356 if (toggle_interval > ULONG_MAX)
2357 toggle_fuzz = ULONG_MAX >> 3;
2358 else
2359 toggle_fuzz = toggle_interval >> 3;
2360 if (toggle_fuzz <= 0)
2361 toggle_fuzz = NSEC_PER_USEC;
2362 do {
2363 r = torture_random(&rand);
2364 cpu = (r >> 1) % (maxcpu + 1);
2365 if (r & 0x1) {
2366 rcu_nocb_cpu_offload(cpu);
2367 atomic_long_inc(&n_nocb_offload);
2368 } else {
2369 rcu_nocb_cpu_deoffload(cpu);
2370 atomic_long_inc(&n_nocb_deoffload);
2371 }
2372 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
2373 set_current_state(TASK_INTERRUPTIBLE);
2374 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
2375 if (stutter_wait("rcu_nocb_toggle"))
2376 sched_set_normal(current, oldnice);
2377 } while (!torture_must_stop());
2378 torture_kthread_stopping("rcu_nocb_toggle");
2379 return 0;
2380 }
2381
2382 /*
2383 * Print torture statistics. Caller must ensure that there is only
2384 * one call to this function at a given time!!! This is normally
2385 * accomplished by relying on the module system to only have one copy
2386 * of the module loaded, and then by giving the rcu_torture_stats
2387 * kthread full control (or the init/cleanup functions when rcu_torture_stats
2388 * thread is not running).
2389 */
2390 static void
rcu_torture_stats_print(void)2391 rcu_torture_stats_print(void)
2392 {
2393 int cpu;
2394 int i;
2395 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2396 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2397 struct rcu_torture *rtcp;
2398 static unsigned long rtcv_snap = ULONG_MAX;
2399 static bool splatted;
2400 struct task_struct *wtp;
2401
2402 for_each_possible_cpu(cpu) {
2403 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2404 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
2405 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
2406 }
2407 }
2408 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) {
2409 if (pipesummary[i] != 0)
2410 break;
2411 }
2412
2413 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2414 rtcp = rcu_access_pointer(rcu_torture_current);
2415 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
2416 rtcp,
2417 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
2418 rcu_torture_current_version,
2419 list_empty(&rcu_torture_freelist),
2420 atomic_read(&n_rcu_torture_alloc),
2421 atomic_read(&n_rcu_torture_alloc_fail),
2422 atomic_read(&n_rcu_torture_free));
2423 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ",
2424 atomic_read(&n_rcu_torture_mberror),
2425 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
2426 n_rcu_torture_barrier_error,
2427 n_rcu_torture_boost_ktrerror);
2428 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
2429 n_rcu_torture_boost_failure,
2430 n_rcu_torture_boosts,
2431 atomic_long_read(&n_rcu_torture_timers));
2432 torture_onoff_stats();
2433 pr_cont("barrier: %ld/%ld:%ld ",
2434 data_race(n_barrier_successes),
2435 data_race(n_barrier_attempts),
2436 data_race(n_rcu_torture_barrier_error));
2437 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
2438 pr_cont("nocb-toggles: %ld:%ld\n",
2439 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
2440
2441 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2442 if (atomic_read(&n_rcu_torture_mberror) ||
2443 atomic_read(&n_rcu_torture_mbchk_fail) ||
2444 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
2445 n_rcu_torture_boost_failure || i > 1) {
2446 pr_cont("%s", "!!! ");
2447 atomic_inc(&n_rcu_torture_error);
2448 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
2449 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
2450 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
2451 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
2452 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
2453 WARN_ON_ONCE(i > 1); // Too-short grace period
2454 }
2455 pr_cont("Reader Pipe: ");
2456 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2457 pr_cont(" %ld", pipesummary[i]);
2458 pr_cont("\n");
2459
2460 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2461 pr_cont("Reader Batch: ");
2462 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2463 pr_cont(" %ld", batchsummary[i]);
2464 pr_cont("\n");
2465
2466 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2467 pr_cont("Free-Block Circulation: ");
2468 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2469 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
2470 }
2471 pr_cont("\n");
2472
2473 if (cur_ops->stats)
2474 cur_ops->stats();
2475 if (rtcv_snap == rcu_torture_current_version &&
2476 rcu_access_pointer(rcu_torture_current) &&
2477 !rcu_stall_is_suppressed()) {
2478 int __maybe_unused flags = 0;
2479 unsigned long __maybe_unused gp_seq = 0;
2480
2481 if (cur_ops->get_gp_data)
2482 cur_ops->get_gp_data(&flags, &gp_seq);
2483 wtp = READ_ONCE(writer_task);
2484 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
2485 rcu_torture_writer_state_getname(),
2486 rcu_torture_writer_state, gp_seq, flags,
2487 wtp == NULL ? ~0U : wtp->__state,
2488 wtp == NULL ? -1 : (int)task_cpu(wtp));
2489 if (!splatted && wtp) {
2490 sched_show_task(wtp);
2491 splatted = true;
2492 }
2493 if (cur_ops->gp_kthread_dbg)
2494 cur_ops->gp_kthread_dbg();
2495 rcu_ftrace_dump(DUMP_ALL);
2496 }
2497 rtcv_snap = rcu_torture_current_version;
2498 }
2499
2500 /*
2501 * Periodically prints torture statistics, if periodic statistics printing
2502 * was specified via the stat_interval module parameter.
2503 */
2504 static int
rcu_torture_stats(void * arg)2505 rcu_torture_stats(void *arg)
2506 {
2507 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
2508 do {
2509 schedule_timeout_interruptible(stat_interval * HZ);
2510 rcu_torture_stats_print();
2511 torture_shutdown_absorb("rcu_torture_stats");
2512 } while (!torture_must_stop());
2513 torture_kthread_stopping("rcu_torture_stats");
2514 return 0;
2515 }
2516
2517 /* Test mem_dump_obj() and friends. */
rcu_torture_mem_dump_obj(void)2518 static void rcu_torture_mem_dump_obj(void)
2519 {
2520 struct rcu_head *rhp;
2521 struct kmem_cache *kcp;
2522 static int z;
2523
2524 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
2525 if (WARN_ON_ONCE(!kcp))
2526 return;
2527 rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
2528 if (WARN_ON_ONCE(!rhp)) {
2529 kmem_cache_destroy(kcp);
2530 return;
2531 }
2532 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
2533 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
2534 mem_dump_obj(ZERO_SIZE_PTR);
2535 pr_alert("mem_dump_obj(NULL):");
2536 mem_dump_obj(NULL);
2537 pr_alert("mem_dump_obj(%px):", &rhp);
2538 mem_dump_obj(&rhp);
2539 pr_alert("mem_dump_obj(%px):", rhp);
2540 mem_dump_obj(rhp);
2541 pr_alert("mem_dump_obj(%px):", &rhp->func);
2542 mem_dump_obj(&rhp->func);
2543 pr_alert("mem_dump_obj(%px):", &z);
2544 mem_dump_obj(&z);
2545 kmem_cache_free(kcp, rhp);
2546 kmem_cache_destroy(kcp);
2547 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2548 if (WARN_ON_ONCE(!rhp))
2549 return;
2550 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2551 pr_alert("mem_dump_obj(kmalloc %px):", rhp);
2552 mem_dump_obj(rhp);
2553 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
2554 mem_dump_obj(&rhp->func);
2555 kfree(rhp);
2556 rhp = vmalloc(4096);
2557 if (WARN_ON_ONCE(!rhp))
2558 return;
2559 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2560 pr_alert("mem_dump_obj(vmalloc %px):", rhp);
2561 mem_dump_obj(rhp);
2562 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
2563 mem_dump_obj(&rhp->func);
2564 vfree(rhp);
2565 }
2566
2567 static void
rcu_torture_print_module_parms(struct rcu_torture_ops * cur_ops,const char * tag)2568 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
2569 {
2570 pr_alert("%s" TORTURE_FLAG
2571 "--- %s: nreaders=%d nfakewriters=%d "
2572 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
2573 "shuffle_interval=%d stutter=%d irqreader=%d "
2574 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
2575 "test_boost=%d/%d test_boost_interval=%d "
2576 "test_boost_duration=%d test_boost_holdoff=%d shutdown_secs=%d "
2577 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
2578 "stall_cpu_block=%d stall_cpu_repeat=%d "
2579 "n_barrier_cbs=%d "
2580 "onoff_interval=%d onoff_holdoff=%d "
2581 "read_exit_delay=%d read_exit_burst=%d "
2582 "reader_flavor=%x "
2583 "nocbs_nthreads=%d nocbs_toggle=%d "
2584 "test_nmis=%d "
2585 "preempt_duration=%d preempt_interval=%d\n",
2586 torture_type, tag, nrealreaders, nrealfakewriters,
2587 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
2588 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
2589 test_boost, cur_ops->can_boost,
2590 test_boost_interval, test_boost_duration, test_boost_holdoff, shutdown_secs,
2591 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
2592 stall_cpu_block, stall_cpu_repeat,
2593 n_barrier_cbs,
2594 onoff_interval, onoff_holdoff,
2595 read_exit_delay, read_exit_burst,
2596 reader_flavor,
2597 nocbs_nthreads, nocbs_toggle,
2598 test_nmis,
2599 preempt_duration, preempt_interval);
2600 }
2601
rcutorture_booster_cleanup(unsigned int cpu)2602 static int rcutorture_booster_cleanup(unsigned int cpu)
2603 {
2604 struct task_struct *t;
2605
2606 if (boost_tasks[cpu] == NULL)
2607 return 0;
2608 mutex_lock(&boost_mutex);
2609 t = boost_tasks[cpu];
2610 boost_tasks[cpu] = NULL;
2611 rcu_torture_enable_rt_throttle();
2612 mutex_unlock(&boost_mutex);
2613
2614 /* This must be outside of the mutex, otherwise deadlock! */
2615 torture_stop_kthread(rcu_torture_boost, t);
2616 return 0;
2617 }
2618
rcutorture_booster_init(unsigned int cpu)2619 static int rcutorture_booster_init(unsigned int cpu)
2620 {
2621 int retval;
2622
2623 if (boost_tasks[cpu] != NULL)
2624 return 0; /* Already created, nothing more to do. */
2625
2626 // Testing RCU priority boosting requires rcutorture do
2627 // some serious abuse. Counter this by running ksoftirqd
2628 // at higher priority.
2629 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
2630 struct sched_param sp;
2631 struct task_struct *t;
2632
2633 t = per_cpu(ksoftirqd, cpu);
2634 WARN_ON_ONCE(!t);
2635 sp.sched_priority = 2;
2636 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2637 #ifdef CONFIG_IRQ_FORCED_THREADING
2638 if (force_irqthreads()) {
2639 t = per_cpu(ktimerd, cpu);
2640 WARN_ON_ONCE(!t);
2641 sp.sched_priority = 2;
2642 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2643 }
2644 #endif
2645 }
2646
2647 /* Don't allow time recalculation while creating a new task. */
2648 mutex_lock(&boost_mutex);
2649 rcu_torture_disable_rt_throttle();
2650 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
2651 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
2652 cpu, "rcu_torture_boost_%u");
2653 if (IS_ERR(boost_tasks[cpu])) {
2654 retval = PTR_ERR(boost_tasks[cpu]);
2655 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
2656 n_rcu_torture_boost_ktrerror++;
2657 boost_tasks[cpu] = NULL;
2658 mutex_unlock(&boost_mutex);
2659 return retval;
2660 }
2661 mutex_unlock(&boost_mutex);
2662 return 0;
2663 }
2664
rcu_torture_stall_nf(struct notifier_block * nb,unsigned long v,void * ptr)2665 static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr)
2666 {
2667 pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr);
2668 return NOTIFY_OK;
2669 }
2670
2671 static struct notifier_block rcu_torture_stall_block = {
2672 .notifier_call = rcu_torture_stall_nf,
2673 };
2674
2675 /*
2676 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
2677 * induces a CPU stall for the time specified by stall_cpu. If a new
2678 * stall test is added, stallsdone in rcu_torture_writer() must be adjusted.
2679 */
rcu_torture_stall_one(int rep,int irqsoff)2680 static void rcu_torture_stall_one(int rep, int irqsoff)
2681 {
2682 int idx;
2683 unsigned long stop_at;
2684
2685 if (stall_cpu_holdoff > 0) {
2686 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2687 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2688 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2689 }
2690 if (!kthread_should_stop() && stall_gp_kthread > 0) {
2691 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2692 rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2693 for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2694 if (kthread_should_stop())
2695 break;
2696 schedule_timeout_uninterruptible(HZ);
2697 }
2698 }
2699 if (!kthread_should_stop() && stall_cpu > 0) {
2700 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2701 stop_at = ktime_get_seconds() + stall_cpu;
2702 /* RCU CPU stall is expected behavior in following code. */
2703 idx = cur_ops->readlock();
2704 if (irqsoff)
2705 local_irq_disable();
2706 else if (!stall_cpu_block)
2707 preempt_disable();
2708 pr_alert("%s start stall episode %d on CPU %d.\n",
2709 __func__, rep + 1, raw_smp_processor_id());
2710 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) &&
2711 !kthread_should_stop())
2712 if (stall_cpu_block) {
2713 #ifdef CONFIG_PREEMPTION
2714 preempt_schedule();
2715 #else
2716 schedule_timeout_uninterruptible(HZ);
2717 #endif
2718 } else if (stall_no_softlockup) {
2719 touch_softlockup_watchdog();
2720 }
2721 if (irqsoff)
2722 local_irq_enable();
2723 else if (!stall_cpu_block)
2724 preempt_enable();
2725 cur_ops->readunlock(idx);
2726 }
2727 }
2728
2729 /*
2730 * CPU-stall kthread. Invokes rcu_torture_stall_one() once, and then as many
2731 * additional times as specified by the stall_cpu_repeat module parameter.
2732 * Note that stall_cpu_irqsoff is ignored on the second and subsequent
2733 * stall.
2734 */
rcu_torture_stall(void * args)2735 static int rcu_torture_stall(void *args)
2736 {
2737 int i;
2738 int repeat = stall_cpu_repeat;
2739 int ret;
2740
2741 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
2742 if (repeat < 0) {
2743 repeat = 0;
2744 WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST));
2745 }
2746 if (rcu_cpu_stall_notifiers) {
2747 ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
2748 if (ret)
2749 pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
2750 __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
2751 }
2752 for (i = 0; i <= repeat; i++) {
2753 if (kthread_should_stop())
2754 break;
2755 rcu_torture_stall_one(i, i == 0 ? stall_cpu_irqsoff : 0);
2756 }
2757 pr_alert("%s end.\n", __func__);
2758 if (rcu_cpu_stall_notifiers && !ret) {
2759 ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block);
2760 if (ret)
2761 pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret);
2762 }
2763 torture_shutdown_absorb("rcu_torture_stall");
2764 while (!kthread_should_stop())
2765 schedule_timeout_interruptible(10 * HZ);
2766 return 0;
2767 }
2768
2769 /* Spawn CPU-stall kthread, if stall_cpu specified. */
rcu_torture_stall_init(void)2770 static int __init rcu_torture_stall_init(void)
2771 {
2772 if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2773 return 0;
2774 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2775 }
2776
2777 /* State structure for forward-progress self-propagating RCU callback. */
2778 struct fwd_cb_state {
2779 struct rcu_head rh;
2780 int stop;
2781 };
2782
2783 /*
2784 * Forward-progress self-propagating RCU callback function. Because
2785 * callbacks run from softirq, this function is an implicit RCU read-side
2786 * critical section.
2787 */
rcu_torture_fwd_prog_cb(struct rcu_head * rhp)2788 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2789 {
2790 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2791
2792 if (READ_ONCE(fcsp->stop)) {
2793 WRITE_ONCE(fcsp->stop, 2);
2794 return;
2795 }
2796 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2797 }
2798
2799 /* State for continuous-flood RCU callbacks. */
2800 struct rcu_fwd_cb {
2801 struct rcu_head rh;
2802 struct rcu_fwd_cb *rfc_next;
2803 struct rcu_fwd *rfc_rfp;
2804 int rfc_gps;
2805 };
2806
2807 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
2808 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
2809 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
2810 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
2811 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2812
2813 struct rcu_launder_hist {
2814 long n_launders;
2815 unsigned long launder_gp_seq;
2816 };
2817
2818 struct rcu_fwd {
2819 spinlock_t rcu_fwd_lock;
2820 struct rcu_fwd_cb *rcu_fwd_cb_head;
2821 struct rcu_fwd_cb **rcu_fwd_cb_tail;
2822 long n_launders_cb;
2823 unsigned long rcu_fwd_startat;
2824 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2825 unsigned long rcu_launder_gp_seq_start;
2826 int rcu_fwd_id;
2827 };
2828
2829 static DEFINE_MUTEX(rcu_fwd_mutex);
2830 static struct rcu_fwd *rcu_fwds;
2831 static unsigned long rcu_fwd_seq;
2832 static atomic_long_t rcu_fwd_max_cbs;
2833 static bool rcu_fwd_emergency_stop;
2834
rcu_torture_fwd_cb_hist(struct rcu_fwd * rfp)2835 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2836 {
2837 unsigned long gps;
2838 unsigned long gps_old;
2839 int i;
2840 int j;
2841
2842 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2843 if (rfp->n_launders_hist[i].n_launders > 0)
2844 break;
2845 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
2846 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
2847 gps_old = rfp->rcu_launder_gp_seq_start;
2848 for (j = 0; j <= i; j++) {
2849 gps = rfp->n_launders_hist[j].launder_gp_seq;
2850 pr_cont(" %ds/%d: %ld:%ld",
2851 j + 1, FWD_CBS_HIST_DIV,
2852 rfp->n_launders_hist[j].n_launders,
2853 rcutorture_seq_diff(gps, gps_old));
2854 gps_old = gps;
2855 }
2856 pr_cont("\n");
2857 }
2858
2859 /* Callback function for continuous-flood RCU callbacks. */
rcu_torture_fwd_cb_cr(struct rcu_head * rhp)2860 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2861 {
2862 unsigned long flags;
2863 int i;
2864 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2865 struct rcu_fwd_cb **rfcpp;
2866 struct rcu_fwd *rfp = rfcp->rfc_rfp;
2867
2868 rfcp->rfc_next = NULL;
2869 rfcp->rfc_gps++;
2870 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2871 rfcpp = rfp->rcu_fwd_cb_tail;
2872 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2873 smp_store_release(rfcpp, rfcp);
2874 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2875 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2876 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2877 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2878 rfp->n_launders_hist[i].n_launders++;
2879 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2880 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2881 }
2882
2883 // Give the scheduler a chance, even on nohz_full CPUs.
rcu_torture_fwd_prog_cond_resched(unsigned long iter)2884 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2885 {
2886 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2887 // Real call_rcu() floods hit userspace, so emulate that.
2888 if (need_resched() || (iter & 0xfff))
2889 schedule();
2890 return;
2891 }
2892 // No userspace emulation: CB invocation throttles call_rcu()
2893 cond_resched();
2894 }
2895
2896 /*
2897 * Free all callbacks on the rcu_fwd_cb_head list, either because the
2898 * test is over or because we hit an OOM event.
2899 */
rcu_torture_fwd_prog_cbfree(struct rcu_fwd * rfp)2900 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2901 {
2902 unsigned long flags;
2903 unsigned long freed = 0;
2904 struct rcu_fwd_cb *rfcp;
2905
2906 for (;;) {
2907 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2908 rfcp = rfp->rcu_fwd_cb_head;
2909 if (!rfcp) {
2910 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2911 break;
2912 }
2913 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2914 if (!rfp->rcu_fwd_cb_head)
2915 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2916 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2917 kfree(rfcp);
2918 freed++;
2919 rcu_torture_fwd_prog_cond_resched(freed);
2920 if (tick_nohz_full_enabled()) {
2921 local_irq_save(flags);
2922 rcu_momentary_eqs();
2923 local_irq_restore(flags);
2924 }
2925 }
2926 return freed;
2927 }
2928
2929 /* Carry out need_resched()/cond_resched() forward-progress testing. */
rcu_torture_fwd_prog_nr(struct rcu_fwd * rfp,int * tested,int * tested_tries)2930 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2931 int *tested, int *tested_tries)
2932 {
2933 unsigned long cver;
2934 unsigned long dur;
2935 struct fwd_cb_state fcs;
2936 unsigned long gps;
2937 int idx;
2938 int sd;
2939 int sd4;
2940 bool selfpropcb = false;
2941 unsigned long stopat;
2942 static DEFINE_TORTURE_RANDOM(trs);
2943
2944 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2945 if (!cur_ops->sync)
2946 return; // Cannot do need_resched() forward progress testing without ->sync.
2947 if (cur_ops->call && cur_ops->cb_barrier) {
2948 init_rcu_head_on_stack(&fcs.rh);
2949 selfpropcb = true;
2950 }
2951
2952 /* Tight loop containing cond_resched(). */
2953 atomic_inc(&rcu_fwd_cb_nodelay);
2954 cur_ops->sync(); /* Later readers see above write. */
2955 if (selfpropcb) {
2956 WRITE_ONCE(fcs.stop, 0);
2957 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2958 }
2959 cver = READ_ONCE(rcu_torture_current_version);
2960 gps = cur_ops->get_gp_seq();
2961 sd = cur_ops->stall_dur() + 1;
2962 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2963 dur = sd4 + torture_random(&trs) % (sd - sd4);
2964 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2965 stopat = rfp->rcu_fwd_startat + dur;
2966 while (time_before(jiffies, stopat) &&
2967 !shutdown_time_arrived() &&
2968 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2969 idx = cur_ops->readlock();
2970 udelay(10);
2971 cur_ops->readunlock(idx);
2972 if (!fwd_progress_need_resched || need_resched())
2973 cond_resched();
2974 }
2975 (*tested_tries)++;
2976 if (!time_before(jiffies, stopat) &&
2977 !shutdown_time_arrived() &&
2978 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2979 (*tested)++;
2980 cver = READ_ONCE(rcu_torture_current_version) - cver;
2981 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2982 WARN_ON(!cver && gps < 2);
2983 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
2984 rfp->rcu_fwd_id, dur, cver, gps);
2985 }
2986 if (selfpropcb) {
2987 WRITE_ONCE(fcs.stop, 1);
2988 cur_ops->sync(); /* Wait for running CB to complete. */
2989 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2990 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2991 }
2992
2993 if (selfpropcb) {
2994 WARN_ON(READ_ONCE(fcs.stop) != 2);
2995 destroy_rcu_head_on_stack(&fcs.rh);
2996 }
2997 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2998 atomic_dec(&rcu_fwd_cb_nodelay);
2999 }
3000
3001 /* Carry out call_rcu() forward-progress testing. */
rcu_torture_fwd_prog_cr(struct rcu_fwd * rfp)3002 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
3003 {
3004 unsigned long cver;
3005 unsigned long flags;
3006 unsigned long gps;
3007 int i;
3008 long n_launders;
3009 long n_launders_cb_snap;
3010 long n_launders_sa;
3011 long n_max_cbs;
3012 long n_max_gps;
3013 struct rcu_fwd_cb *rfcp;
3014 struct rcu_fwd_cb *rfcpn;
3015 unsigned long stopat;
3016 unsigned long stoppedat;
3017
3018 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
3019 if (READ_ONCE(rcu_fwd_emergency_stop))
3020 return; /* Get out of the way quickly, no GP wait! */
3021 if (!cur_ops->call)
3022 return; /* Can't do call_rcu() fwd prog without ->call. */
3023
3024 /* Loop continuously posting RCU callbacks. */
3025 atomic_inc(&rcu_fwd_cb_nodelay);
3026 cur_ops->sync(); /* Later readers see above write. */
3027 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
3028 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
3029 n_launders = 0;
3030 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
3031 n_launders_sa = 0;
3032 n_max_cbs = 0;
3033 n_max_gps = 0;
3034 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
3035 rfp->n_launders_hist[i].n_launders = 0;
3036 cver = READ_ONCE(rcu_torture_current_version);
3037 gps = cur_ops->get_gp_seq();
3038 rfp->rcu_launder_gp_seq_start = gps;
3039 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
3040 while (time_before(jiffies, stopat) &&
3041 !shutdown_time_arrived() &&
3042 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
3043 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
3044 rfcpn = NULL;
3045 if (rfcp)
3046 rfcpn = READ_ONCE(rfcp->rfc_next);
3047 if (rfcpn) {
3048 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
3049 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
3050 break;
3051 rfp->rcu_fwd_cb_head = rfcpn;
3052 n_launders++;
3053 n_launders_sa++;
3054 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
3055 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
3056 if (WARN_ON_ONCE(!rfcp)) {
3057 schedule_timeout_interruptible(1);
3058 continue;
3059 }
3060 n_max_cbs++;
3061 n_launders_sa = 0;
3062 rfcp->rfc_gps = 0;
3063 rfcp->rfc_rfp = rfp;
3064 } else {
3065 rfcp = NULL;
3066 }
3067 if (rfcp)
3068 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
3069 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
3070 if (tick_nohz_full_enabled()) {
3071 local_irq_save(flags);
3072 rcu_momentary_eqs();
3073 local_irq_restore(flags);
3074 }
3075 }
3076 stoppedat = jiffies;
3077 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
3078 cver = READ_ONCE(rcu_torture_current_version) - cver;
3079 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
3080 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
3081 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
3082 (void)rcu_torture_fwd_prog_cbfree(rfp);
3083
3084 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
3085 !shutdown_time_arrived()) {
3086 if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg)
3087 cur_ops->gp_kthread_dbg();
3088 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld #online %u\n",
3089 __func__,
3090 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
3091 n_launders + n_max_cbs - n_launders_cb_snap,
3092 n_launders, n_launders_sa,
3093 n_max_gps, n_max_cbs, cver, gps, num_online_cpus());
3094 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
3095 mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
3096 rcu_torture_fwd_cb_hist(rfp);
3097 mutex_unlock(&rcu_fwd_mutex);
3098 }
3099 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
3100 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
3101 atomic_dec(&rcu_fwd_cb_nodelay);
3102 }
3103
3104
3105 /*
3106 * OOM notifier, but this only prints diagnostic information for the
3107 * current forward-progress test.
3108 */
rcutorture_oom_notify(struct notifier_block * self,unsigned long notused,void * nfreed)3109 static int rcutorture_oom_notify(struct notifier_block *self,
3110 unsigned long notused, void *nfreed)
3111 {
3112 int i;
3113 long ncbs;
3114 struct rcu_fwd *rfp;
3115
3116 mutex_lock(&rcu_fwd_mutex);
3117 rfp = rcu_fwds;
3118 if (!rfp) {
3119 mutex_unlock(&rcu_fwd_mutex);
3120 return NOTIFY_OK;
3121 }
3122 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
3123 __func__);
3124 for (i = 0; i < fwd_progress; i++) {
3125 rcu_torture_fwd_cb_hist(&rfp[i]);
3126 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
3127 }
3128 WRITE_ONCE(rcu_fwd_emergency_stop, true);
3129 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
3130 ncbs = 0;
3131 for (i = 0; i < fwd_progress; i++)
3132 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
3133 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
3134 cur_ops->cb_barrier();
3135 ncbs = 0;
3136 for (i = 0; i < fwd_progress; i++)
3137 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
3138 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
3139 cur_ops->cb_barrier();
3140 ncbs = 0;
3141 for (i = 0; i < fwd_progress; i++)
3142 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
3143 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
3144 smp_mb(); /* Frees before return to avoid redoing OOM. */
3145 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
3146 pr_info("%s returning after OOM processing.\n", __func__);
3147 mutex_unlock(&rcu_fwd_mutex);
3148 return NOTIFY_OK;
3149 }
3150
3151 static struct notifier_block rcutorture_oom_nb = {
3152 .notifier_call = rcutorture_oom_notify
3153 };
3154
3155 /* Carry out grace-period forward-progress testing. */
rcu_torture_fwd_prog(void * args)3156 static int rcu_torture_fwd_prog(void *args)
3157 {
3158 bool firsttime = true;
3159 long max_cbs;
3160 int oldnice = task_nice(current);
3161 unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
3162 struct rcu_fwd *rfp = args;
3163 int tested = 0;
3164 int tested_tries = 0;
3165
3166 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
3167 rcu_bind_current_to_nocb();
3168 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
3169 set_user_nice(current, MAX_NICE);
3170 do {
3171 if (!rfp->rcu_fwd_id) {
3172 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
3173 WRITE_ONCE(rcu_fwd_emergency_stop, false);
3174 if (!firsttime) {
3175 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
3176 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
3177 }
3178 firsttime = false;
3179 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
3180 } else {
3181 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
3182 schedule_timeout_interruptible(HZ / 20);
3183 oldseq = READ_ONCE(rcu_fwd_seq);
3184 }
3185 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
3186 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
3187 rcu_torture_fwd_prog_cr(rfp);
3188 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
3189 (!IS_ENABLED(CONFIG_TINY_RCU) ||
3190 (rcu_inkernel_boot_has_ended() &&
3191 torture_num_online_cpus() > rfp->rcu_fwd_id)))
3192 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
3193
3194 /* Avoid slow periods, better to test when busy. */
3195 if (stutter_wait("rcu_torture_fwd_prog"))
3196 sched_set_normal(current, oldnice);
3197 } while (!torture_must_stop());
3198 /* Short runs might not contain a valid forward-progress attempt. */
3199 if (!rfp->rcu_fwd_id) {
3200 WARN_ON(!tested && tested_tries >= 5);
3201 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
3202 }
3203 torture_kthread_stopping("rcu_torture_fwd_prog");
3204 return 0;
3205 }
3206
3207 /* If forward-progress checking is requested and feasible, spawn the thread. */
rcu_torture_fwd_prog_init(void)3208 static int __init rcu_torture_fwd_prog_init(void)
3209 {
3210 int i;
3211 int ret = 0;
3212 struct rcu_fwd *rfp;
3213
3214 if (!fwd_progress)
3215 return 0; /* Not requested, so don't do it. */
3216 if (fwd_progress >= nr_cpu_ids) {
3217 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
3218 fwd_progress = nr_cpu_ids;
3219 } else if (fwd_progress < 0) {
3220 fwd_progress = nr_cpu_ids;
3221 }
3222 if ((!cur_ops->sync && !cur_ops->call) ||
3223 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
3224 cur_ops == &rcu_busted_ops) {
3225 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
3226 fwd_progress = 0;
3227 return 0;
3228 }
3229 if (stall_cpu > 0 || (preempt_duration > 0 && IS_ENABLED(CONFIG_RCU_NOCB_CPU))) {
3230 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall and/or preemption testing");
3231 fwd_progress = 0;
3232 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
3233 return -EINVAL; /* In module, can fail back to user. */
3234 WARN_ON(1); /* Make sure rcutorture scripting notices conflict. */
3235 return 0;
3236 }
3237 if (fwd_progress_holdoff <= 0)
3238 fwd_progress_holdoff = 1;
3239 if (fwd_progress_div <= 0)
3240 fwd_progress_div = 4;
3241 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
3242 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
3243 if (!rfp || !fwd_prog_tasks) {
3244 kfree(rfp);
3245 kfree(fwd_prog_tasks);
3246 fwd_prog_tasks = NULL;
3247 fwd_progress = 0;
3248 return -ENOMEM;
3249 }
3250 for (i = 0; i < fwd_progress; i++) {
3251 spin_lock_init(&rfp[i].rcu_fwd_lock);
3252 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
3253 rfp[i].rcu_fwd_id = i;
3254 }
3255 mutex_lock(&rcu_fwd_mutex);
3256 rcu_fwds = rfp;
3257 mutex_unlock(&rcu_fwd_mutex);
3258 register_oom_notifier(&rcutorture_oom_nb);
3259 for (i = 0; i < fwd_progress; i++) {
3260 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
3261 if (ret) {
3262 fwd_progress = i;
3263 return ret;
3264 }
3265 }
3266 return 0;
3267 }
3268
rcu_torture_fwd_prog_cleanup(void)3269 static void rcu_torture_fwd_prog_cleanup(void)
3270 {
3271 int i;
3272 struct rcu_fwd *rfp;
3273
3274 if (!rcu_fwds || !fwd_prog_tasks)
3275 return;
3276 for (i = 0; i < fwd_progress; i++)
3277 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
3278 unregister_oom_notifier(&rcutorture_oom_nb);
3279 mutex_lock(&rcu_fwd_mutex);
3280 rfp = rcu_fwds;
3281 rcu_fwds = NULL;
3282 mutex_unlock(&rcu_fwd_mutex);
3283 kfree(rfp);
3284 kfree(fwd_prog_tasks);
3285 fwd_prog_tasks = NULL;
3286 }
3287
3288 /* Callback function for RCU barrier testing. */
rcu_torture_barrier_cbf(struct rcu_head * rcu)3289 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
3290 {
3291 atomic_inc(&barrier_cbs_invoked);
3292 }
3293
3294 /* IPI handler to get callback posted on desired CPU, if online. */
rcu_torture_barrier1cb(void * rcu_void)3295 static int rcu_torture_barrier1cb(void *rcu_void)
3296 {
3297 struct rcu_head *rhp = rcu_void;
3298
3299 cur_ops->call(rhp, rcu_torture_barrier_cbf);
3300 return 0;
3301 }
3302
3303 /* kthread function to register callbacks used to test RCU barriers. */
rcu_torture_barrier_cbs(void * arg)3304 static int rcu_torture_barrier_cbs(void *arg)
3305 {
3306 long myid = (long)arg;
3307 bool lastphase = false;
3308 bool newphase;
3309 struct rcu_head rcu;
3310
3311 init_rcu_head_on_stack(&rcu);
3312 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
3313 set_user_nice(current, MAX_NICE);
3314 do {
3315 wait_event(barrier_cbs_wq[myid],
3316 (newphase =
3317 smp_load_acquire(&barrier_phase)) != lastphase ||
3318 torture_must_stop());
3319 lastphase = newphase;
3320 if (torture_must_stop())
3321 break;
3322 /*
3323 * The above smp_load_acquire() ensures barrier_phase load
3324 * is ordered before the following ->call().
3325 */
3326 if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1))
3327 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
3328
3329 if (atomic_dec_and_test(&barrier_cbs_count))
3330 wake_up(&barrier_wq);
3331 } while (!torture_must_stop());
3332 if (cur_ops->cb_barrier != NULL)
3333 cur_ops->cb_barrier();
3334 destroy_rcu_head_on_stack(&rcu);
3335 torture_kthread_stopping("rcu_torture_barrier_cbs");
3336 return 0;
3337 }
3338
3339 /* kthread function to drive and coordinate RCU barrier testing. */
rcu_torture_barrier(void * arg)3340 static int rcu_torture_barrier(void *arg)
3341 {
3342 int i;
3343
3344 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
3345 do {
3346 atomic_set(&barrier_cbs_invoked, 0);
3347 atomic_set(&barrier_cbs_count, n_barrier_cbs);
3348 /* Ensure barrier_phase ordered after prior assignments. */
3349 smp_store_release(&barrier_phase, !barrier_phase);
3350 for (i = 0; i < n_barrier_cbs; i++)
3351 wake_up(&barrier_cbs_wq[i]);
3352 wait_event(barrier_wq,
3353 atomic_read(&barrier_cbs_count) == 0 ||
3354 torture_must_stop());
3355 if (torture_must_stop())
3356 break;
3357 n_barrier_attempts++;
3358 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
3359 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
3360 n_rcu_torture_barrier_error++;
3361 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
3362 atomic_read(&barrier_cbs_invoked),
3363 n_barrier_cbs);
3364 WARN_ON(1);
3365 // Wait manually for the remaining callbacks
3366 i = 0;
3367 do {
3368 if (WARN_ON(i++ > HZ))
3369 i = INT_MIN;
3370 schedule_timeout_interruptible(1);
3371 cur_ops->cb_barrier();
3372 } while (atomic_read(&barrier_cbs_invoked) !=
3373 n_barrier_cbs &&
3374 !torture_must_stop());
3375 smp_mb(); // Can't trust ordering if broken.
3376 if (!torture_must_stop())
3377 pr_err("Recovered: barrier_cbs_invoked = %d\n",
3378 atomic_read(&barrier_cbs_invoked));
3379 } else {
3380 n_barrier_successes++;
3381 }
3382 schedule_timeout_interruptible(HZ / 10);
3383 } while (!torture_must_stop());
3384 torture_kthread_stopping("rcu_torture_barrier");
3385 return 0;
3386 }
3387
3388 /* Initialize RCU barrier testing. */
rcu_torture_barrier_init(void)3389 static int rcu_torture_barrier_init(void)
3390 {
3391 int i;
3392 int ret;
3393
3394 if (n_barrier_cbs <= 0)
3395 return 0;
3396 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
3397 pr_alert("%s" TORTURE_FLAG
3398 " Call or barrier ops missing for %s,\n",
3399 torture_type, cur_ops->name);
3400 pr_alert("%s" TORTURE_FLAG
3401 " RCU barrier testing omitted from run.\n",
3402 torture_type);
3403 return 0;
3404 }
3405 atomic_set(&barrier_cbs_count, 0);
3406 atomic_set(&barrier_cbs_invoked, 0);
3407 barrier_cbs_tasks =
3408 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
3409 GFP_KERNEL);
3410 barrier_cbs_wq =
3411 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
3412 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
3413 return -ENOMEM;
3414 for (i = 0; i < n_barrier_cbs; i++) {
3415 init_waitqueue_head(&barrier_cbs_wq[i]);
3416 ret = torture_create_kthread(rcu_torture_barrier_cbs,
3417 (void *)(long)i,
3418 barrier_cbs_tasks[i]);
3419 if (ret)
3420 return ret;
3421 }
3422 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
3423 }
3424
3425 /* Clean up after RCU barrier testing. */
rcu_torture_barrier_cleanup(void)3426 static void rcu_torture_barrier_cleanup(void)
3427 {
3428 int i;
3429
3430 torture_stop_kthread(rcu_torture_barrier, barrier_task);
3431 if (barrier_cbs_tasks != NULL) {
3432 for (i = 0; i < n_barrier_cbs; i++)
3433 torture_stop_kthread(rcu_torture_barrier_cbs,
3434 barrier_cbs_tasks[i]);
3435 kfree(barrier_cbs_tasks);
3436 barrier_cbs_tasks = NULL;
3437 }
3438 if (barrier_cbs_wq != NULL) {
3439 kfree(barrier_cbs_wq);
3440 barrier_cbs_wq = NULL;
3441 }
3442 }
3443
rcu_torture_can_boost(void)3444 static bool rcu_torture_can_boost(void)
3445 {
3446 static int boost_warn_once;
3447 int prio;
3448
3449 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
3450 return false;
3451 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
3452 return false;
3453
3454 prio = rcu_get_gp_kthreads_prio();
3455 if (!prio)
3456 return false;
3457
3458 if (prio < 2) {
3459 if (boost_warn_once == 1)
3460 return false;
3461
3462 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
3463 boost_warn_once = 1;
3464 return false;
3465 }
3466
3467 return true;
3468 }
3469
3470 static bool read_exit_child_stop;
3471 static bool read_exit_child_stopped;
3472 static wait_queue_head_t read_exit_wq;
3473
3474 // Child kthread which just does an rcutorture reader and exits.
rcu_torture_read_exit_child(void * trsp_in)3475 static int rcu_torture_read_exit_child(void *trsp_in)
3476 {
3477 struct torture_random_state *trsp = trsp_in;
3478
3479 set_user_nice(current, MAX_NICE);
3480 // Minimize time between reading and exiting.
3481 while (!kthread_should_stop())
3482 schedule_timeout_uninterruptible(HZ / 20);
3483 (void)rcu_torture_one_read(trsp, -1);
3484 return 0;
3485 }
3486
3487 // Parent kthread which creates and destroys read-exit child kthreads.
rcu_torture_read_exit(void * unused)3488 static int rcu_torture_read_exit(void *unused)
3489 {
3490 bool errexit = false;
3491 int i;
3492 struct task_struct *tsp;
3493 DEFINE_TORTURE_RANDOM(trs);
3494
3495 // Allocate and initialize.
3496 set_user_nice(current, MAX_NICE);
3497 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
3498
3499 // Each pass through this loop does one read-exit episode.
3500 do {
3501 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
3502 for (i = 0; i < read_exit_burst; i++) {
3503 if (READ_ONCE(read_exit_child_stop))
3504 break;
3505 stutter_wait("rcu_torture_read_exit");
3506 // Spawn child.
3507 tsp = kthread_run(rcu_torture_read_exit_child,
3508 &trs, "%s", "rcu_torture_read_exit_child");
3509 if (IS_ERR(tsp)) {
3510 TOROUT_ERRSTRING("out of memory");
3511 errexit = true;
3512 break;
3513 }
3514 cond_resched();
3515 kthread_stop(tsp);
3516 n_read_exits++;
3517 }
3518 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
3519 rcu_barrier(); // Wait for task_struct free, avoid OOM.
3520 i = 0;
3521 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++)
3522 schedule_timeout_uninterruptible(HZ);
3523 } while (!errexit && !READ_ONCE(read_exit_child_stop));
3524
3525 // Clean up and exit.
3526 smp_store_release(&read_exit_child_stopped, true); // After reaping.
3527 smp_mb(); // Store before wakeup.
3528 wake_up(&read_exit_wq);
3529 while (!torture_must_stop())
3530 schedule_timeout_uninterruptible(HZ / 20);
3531 torture_kthread_stopping("rcu_torture_read_exit");
3532 return 0;
3533 }
3534
rcu_torture_read_exit_init(void)3535 static int rcu_torture_read_exit_init(void)
3536 {
3537 if (read_exit_burst <= 0)
3538 return 0;
3539 init_waitqueue_head(&read_exit_wq);
3540 read_exit_child_stop = false;
3541 read_exit_child_stopped = false;
3542 return torture_create_kthread(rcu_torture_read_exit, NULL,
3543 read_exit_task);
3544 }
3545
rcu_torture_read_exit_cleanup(void)3546 static void rcu_torture_read_exit_cleanup(void)
3547 {
3548 if (!read_exit_task)
3549 return;
3550 WRITE_ONCE(read_exit_child_stop, true);
3551 smp_mb(); // Above write before wait.
3552 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
3553 torture_stop_kthread(rcutorture_read_exit, read_exit_task);
3554 }
3555
rcutorture_test_nmis(int n)3556 static void rcutorture_test_nmis(int n)
3557 {
3558 #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3559 int cpu;
3560 int dumpcpu;
3561 int i;
3562
3563 for (i = 0; i < n; i++) {
3564 preempt_disable();
3565 cpu = smp_processor_id();
3566 dumpcpu = cpu + 1;
3567 if (dumpcpu >= nr_cpu_ids)
3568 dumpcpu = 0;
3569 pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu);
3570 dump_cpu_task(dumpcpu);
3571 preempt_enable();
3572 schedule_timeout_uninterruptible(15 * HZ);
3573 }
3574 #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3575 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis);
3576 #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3577 }
3578
3579 // Randomly preempt online CPUs.
rcu_torture_preempt(void * unused)3580 static int rcu_torture_preempt(void *unused)
3581 {
3582 int cpu = -1;
3583 DEFINE_TORTURE_RANDOM(rand);
3584
3585 schedule_timeout_idle(stall_cpu_holdoff);
3586 do {
3587 // Wait for preempt_interval ms with up to 100us fuzz.
3588 torture_hrtimeout_ms(preempt_interval, 100, &rand);
3589 // Select online CPU.
3590 cpu = cpumask_next(cpu, cpu_online_mask);
3591 if (cpu >= nr_cpu_ids)
3592 cpu = cpumask_next(-1, cpu_online_mask);
3593 WARN_ON_ONCE(cpu >= nr_cpu_ids);
3594 // Move to that CPU, if can't do so, retry later.
3595 if (torture_sched_setaffinity(current->pid, cpumask_of(cpu), false))
3596 continue;
3597 // Preempt at high-ish priority, then reset to normal.
3598 sched_set_fifo(current);
3599 torture_sched_setaffinity(current->pid, cpu_present_mask, true);
3600 mdelay(preempt_duration);
3601 sched_set_normal(current, 0);
3602 stutter_wait("rcu_torture_preempt");
3603 } while (!torture_must_stop());
3604 torture_kthread_stopping("rcu_torture_preempt");
3605 return 0;
3606 }
3607
3608 static enum cpuhp_state rcutor_hp;
3609
3610 static void
rcu_torture_cleanup(void)3611 rcu_torture_cleanup(void)
3612 {
3613 int firsttime;
3614 int flags = 0;
3615 unsigned long gp_seq = 0;
3616 int i;
3617 int j;
3618
3619 if (torture_cleanup_begin()) {
3620 if (cur_ops->cb_barrier != NULL) {
3621 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3622 cur_ops->cb_barrier();
3623 }
3624 if (cur_ops->gp_slow_unregister)
3625 cur_ops->gp_slow_unregister(NULL);
3626 return;
3627 }
3628 if (!cur_ops) {
3629 torture_cleanup_end();
3630 return;
3631 }
3632
3633 rcutorture_test_nmis(test_nmis);
3634
3635 if (cur_ops->gp_kthread_dbg)
3636 cur_ops->gp_kthread_dbg();
3637 torture_stop_kthread(rcu_torture_preempt, preempt_task);
3638 rcu_torture_read_exit_cleanup();
3639 rcu_torture_barrier_cleanup();
3640 rcu_torture_fwd_prog_cleanup();
3641 torture_stop_kthread(rcu_torture_stall, stall_task);
3642 torture_stop_kthread(rcu_torture_writer, writer_task);
3643
3644 if (nocb_tasks) {
3645 for (i = 0; i < nrealnocbers; i++)
3646 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
3647 kfree(nocb_tasks);
3648 nocb_tasks = NULL;
3649 }
3650
3651 if (reader_tasks) {
3652 for (i = 0; i < nrealreaders; i++)
3653 torture_stop_kthread(rcu_torture_reader,
3654 reader_tasks[i]);
3655 kfree(reader_tasks);
3656 reader_tasks = NULL;
3657 }
3658 kfree(rcu_torture_reader_mbchk);
3659 rcu_torture_reader_mbchk = NULL;
3660
3661 if (fakewriter_tasks) {
3662 for (i = 0; i < nrealfakewriters; i++)
3663 torture_stop_kthread(rcu_torture_fakewriter,
3664 fakewriter_tasks[i]);
3665 kfree(fakewriter_tasks);
3666 fakewriter_tasks = NULL;
3667 }
3668
3669 if (cur_ops->get_gp_data)
3670 cur_ops->get_gp_data(&flags, &gp_seq);
3671 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
3672 cur_ops->name, (long)gp_seq, flags,
3673 rcutorture_seq_diff(gp_seq, start_gp_seq));
3674 torture_stop_kthread(rcu_torture_stats, stats_task);
3675 torture_stop_kthread(rcu_torture_fqs, fqs_task);
3676 if (rcu_torture_can_boost() && rcutor_hp >= 0)
3677 cpuhp_remove_state(rcutor_hp);
3678
3679 /*
3680 * Wait for all RCU callbacks to fire, then do torture-type-specific
3681 * cleanup operations.
3682 */
3683 if (cur_ops->cb_barrier != NULL) {
3684 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3685 cur_ops->cb_barrier();
3686 }
3687 if (cur_ops->cleanup != NULL)
3688 cur_ops->cleanup();
3689
3690 rcu_torture_mem_dump_obj();
3691
3692 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
3693
3694 if (err_segs_recorded) {
3695 pr_alert("Failure/close-call rcutorture reader segments:\n");
3696 if (rt_read_nsegs == 0)
3697 pr_alert("\t: No segments recorded!!!\n");
3698 firsttime = 1;
3699 for (i = 0; i < rt_read_nsegs; i++) {
3700 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP))
3701 pr_alert("\t%lluus ", div64_u64(err_segs[i].rt_ts, 1000ULL));
3702 else
3703 pr_alert("\t");
3704 pr_cont("%d: %#4x", i, err_segs[i].rt_readstate);
3705 if (err_segs[i].rt_delay_jiffies != 0) {
3706 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
3707 err_segs[i].rt_delay_jiffies);
3708 firsttime = 0;
3709 }
3710 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_CPU)) {
3711 pr_cont(" CPU %2d", err_segs[i].rt_cpu);
3712 if (err_segs[i].rt_cpu != err_segs[i].rt_end_cpu)
3713 pr_cont("->%-2d", err_segs[i].rt_end_cpu);
3714 else
3715 pr_cont(" ...");
3716 }
3717 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_LOG_GP) &&
3718 cur_ops->gather_gp_seqs && cur_ops->format_gp_seqs) {
3719 char buf1[20+1];
3720 char buf2[20+1];
3721 char sepchar = '-';
3722
3723 cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq,
3724 buf1, ARRAY_SIZE(buf1));
3725 cur_ops->format_gp_seqs(err_segs[i].rt_gp_seq_end,
3726 buf2, ARRAY_SIZE(buf2));
3727 if (err_segs[i].rt_gp_seq == err_segs[i].rt_gp_seq_end) {
3728 if (buf2[0]) {
3729 for (j = 0; buf2[j]; j++)
3730 buf2[j] = '.';
3731 if (j)
3732 buf2[j - 1] = ' ';
3733 }
3734 sepchar = ' ';
3735 }
3736 pr_cont(" %s%c%s", buf1, sepchar, buf2);
3737 }
3738 if (err_segs[i].rt_delay_ms != 0) {
3739 pr_cont(" %s%ldms", firsttime ? "" : "+",
3740 err_segs[i].rt_delay_ms);
3741 firsttime = 0;
3742 }
3743 if (err_segs[i].rt_delay_us != 0) {
3744 pr_cont(" %s%ldus", firsttime ? "" : "+",
3745 err_segs[i].rt_delay_us);
3746 firsttime = 0;
3747 }
3748 pr_cont("%s", err_segs[i].rt_preempted ? " preempted" : "");
3749 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_BH)
3750 pr_cont(" BH");
3751 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_IRQ)
3752 pr_cont(" IRQ");
3753 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_PREEMPT)
3754 pr_cont(" PREEMPT");
3755 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RBH)
3756 pr_cont(" RBH");
3757 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_SCHED)
3758 pr_cont(" SCHED");
3759 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_1)
3760 pr_cont(" RCU_1");
3761 if (err_segs[i].rt_readstate & RCUTORTURE_RDR_RCU_2)
3762 pr_cont(" RCU_2");
3763 pr_cont("\n");
3764
3765 }
3766 if (rt_read_preempted)
3767 pr_alert("\tReader was preempted.\n");
3768 }
3769 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
3770 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
3771 else if (torture_onoff_failures())
3772 rcu_torture_print_module_parms(cur_ops,
3773 "End of test: RCU_HOTPLUG");
3774 else
3775 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
3776 torture_cleanup_end();
3777 if (cur_ops->gp_slow_unregister)
3778 cur_ops->gp_slow_unregister(NULL);
3779 }
3780
rcu_torture_leak_cb(struct rcu_head * rhp)3781 static void rcu_torture_leak_cb(struct rcu_head *rhp)
3782 {
3783 }
3784
rcu_torture_err_cb(struct rcu_head * rhp)3785 static void rcu_torture_err_cb(struct rcu_head *rhp)
3786 {
3787 /*
3788 * This -might- happen due to race conditions, but is unlikely.
3789 * The scenario that leads to this happening is that the
3790 * first of the pair of duplicate callbacks is queued,
3791 * someone else starts a grace period that includes that
3792 * callback, then the second of the pair must wait for the
3793 * next grace period. Unlikely, but can happen. If it
3794 * does happen, the debug-objects subsystem won't have splatted.
3795 */
3796 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
3797 }
3798
3799 /*
3800 * Verify that double-free causes debug-objects to complain, but only
3801 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
3802 * cannot be carried out.
3803 */
rcu_test_debug_objects(void)3804 static void rcu_test_debug_objects(void)
3805 {
3806 struct rcu_head rh1;
3807 struct rcu_head rh2;
3808 int idx;
3809
3810 if (!IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) {
3811 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_%s()\n",
3812 KBUILD_MODNAME, cur_ops->name);
3813 return;
3814 }
3815
3816 if (WARN_ON_ONCE(cur_ops->debug_objects &&
3817 (!cur_ops->call || !cur_ops->cb_barrier)))
3818 return;
3819
3820 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
3821
3822 init_rcu_head_on_stack(&rh1);
3823 init_rcu_head_on_stack(&rh2);
3824 pr_alert("%s: WARN: Duplicate call_%s() test starting.\n", KBUILD_MODNAME, cur_ops->name);
3825
3826 /* Try to queue the rh2 pair of callbacks for the same grace period. */
3827 idx = cur_ops->readlock(); /* Make it impossible to finish a grace period. */
3828 cur_ops->call(&rh1, rcu_torture_leak_cb); /* Start grace period. */
3829 cur_ops->call(&rh2, rcu_torture_leak_cb);
3830 cur_ops->call(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
3831 if (rhp) {
3832 cur_ops->call(rhp, rcu_torture_leak_cb);
3833 cur_ops->call(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
3834 }
3835 cur_ops->readunlock(idx);
3836
3837 /* Wait for them all to get done so we can safely return. */
3838 cur_ops->cb_barrier();
3839 pr_alert("%s: WARN: Duplicate call_%s() test complete.\n", KBUILD_MODNAME, cur_ops->name);
3840 destroy_rcu_head_on_stack(&rh1);
3841 destroy_rcu_head_on_stack(&rh2);
3842 kfree(rhp);
3843 }
3844
rcutorture_sync(void)3845 static void rcutorture_sync(void)
3846 {
3847 static unsigned long n;
3848
3849 if (cur_ops->sync && !(++n & 0xfff))
3850 cur_ops->sync();
3851 }
3852
3853 static DEFINE_MUTEX(mut0);
3854 static DEFINE_MUTEX(mut1);
3855 static DEFINE_MUTEX(mut2);
3856 static DEFINE_MUTEX(mut3);
3857 static DEFINE_MUTEX(mut4);
3858 static DEFINE_MUTEX(mut5);
3859 static DEFINE_MUTEX(mut6);
3860 static DEFINE_MUTEX(mut7);
3861 static DEFINE_MUTEX(mut8);
3862 static DEFINE_MUTEX(mut9);
3863
3864 static DECLARE_RWSEM(rwsem0);
3865 static DECLARE_RWSEM(rwsem1);
3866 static DECLARE_RWSEM(rwsem2);
3867 static DECLARE_RWSEM(rwsem3);
3868 static DECLARE_RWSEM(rwsem4);
3869 static DECLARE_RWSEM(rwsem5);
3870 static DECLARE_RWSEM(rwsem6);
3871 static DECLARE_RWSEM(rwsem7);
3872 static DECLARE_RWSEM(rwsem8);
3873 static DECLARE_RWSEM(rwsem9);
3874
3875 DEFINE_STATIC_SRCU(srcu0);
3876 DEFINE_STATIC_SRCU(srcu1);
3877 DEFINE_STATIC_SRCU(srcu2);
3878 DEFINE_STATIC_SRCU(srcu3);
3879 DEFINE_STATIC_SRCU(srcu4);
3880 DEFINE_STATIC_SRCU(srcu5);
3881 DEFINE_STATIC_SRCU(srcu6);
3882 DEFINE_STATIC_SRCU(srcu7);
3883 DEFINE_STATIC_SRCU(srcu8);
3884 DEFINE_STATIC_SRCU(srcu9);
3885
srcu_lockdep_next(const char * f,const char * fl,const char * fs,const char * fu,int i,int cyclelen,int deadlock)3886 static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i,
3887 int cyclelen, int deadlock)
3888 {
3889 int j = i + 1;
3890
3891 if (j >= cyclelen)
3892 j = deadlock ? 0 : -1;
3893 if (j >= 0)
3894 pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i);
3895 else
3896 pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i);
3897 return j;
3898 }
3899
3900 // Test lockdep on SRCU-based deadlock scenarios.
rcu_torture_init_srcu_lockdep(void)3901 static void rcu_torture_init_srcu_lockdep(void)
3902 {
3903 int cyclelen;
3904 int deadlock;
3905 bool err = false;
3906 int i;
3907 int j;
3908 int idx;
3909 struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4,
3910 &mut5, &mut6, &mut7, &mut8, &mut9 };
3911 struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4,
3912 &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 };
3913 struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4,
3914 &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 };
3915 int testtype;
3916
3917 if (!test_srcu_lockdep)
3918 return;
3919
3920 deadlock = test_srcu_lockdep / 1000;
3921 testtype = (test_srcu_lockdep / 10) % 100;
3922 cyclelen = test_srcu_lockdep % 10;
3923 WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus));
3924 if (WARN_ONCE(deadlock != !!deadlock,
3925 "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n",
3926 __func__, test_srcu_lockdep, deadlock))
3927 err = true;
3928 if (WARN_ONCE(cyclelen <= 0,
3929 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n",
3930 __func__, test_srcu_lockdep, cyclelen))
3931 err = true;
3932 if (err)
3933 goto err_out;
3934
3935 if (testtype == 0) {
3936 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n",
3937 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3938 if (deadlock && cyclelen == 1)
3939 pr_info("%s: Expect hang.\n", __func__);
3940 for (i = 0; i < cyclelen; i++) {
3941 j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu",
3942 "srcu_read_unlock", i, cyclelen, deadlock);
3943 idx = srcu_read_lock(srcus[i]);
3944 if (j >= 0)
3945 synchronize_srcu(srcus[j]);
3946 srcu_read_unlock(srcus[i], idx);
3947 }
3948 return;
3949 }
3950
3951 if (testtype == 1) {
3952 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n",
3953 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3954 for (i = 0; i < cyclelen; i++) {
3955 pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n",
3956 __func__, i, i, i, i);
3957 idx = srcu_read_lock(srcus[i]);
3958 mutex_lock(muts[i]);
3959 mutex_unlock(muts[i]);
3960 srcu_read_unlock(srcus[i], idx);
3961
3962 j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu",
3963 "mutex_unlock", i, cyclelen, deadlock);
3964 mutex_lock(muts[i]);
3965 if (j >= 0)
3966 synchronize_srcu(srcus[j]);
3967 mutex_unlock(muts[i]);
3968 }
3969 return;
3970 }
3971
3972 if (testtype == 2) {
3973 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n",
3974 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3975 for (i = 0; i < cyclelen; i++) {
3976 pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n",
3977 __func__, i, i, i, i);
3978 idx = srcu_read_lock(srcus[i]);
3979 down_read(rwsems[i]);
3980 up_read(rwsems[i]);
3981 srcu_read_unlock(srcus[i], idx);
3982
3983 j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu",
3984 "up_write", i, cyclelen, deadlock);
3985 down_write(rwsems[i]);
3986 if (j >= 0)
3987 synchronize_srcu(srcus[j]);
3988 up_write(rwsems[i]);
3989 }
3990 return;
3991 }
3992
3993 #ifdef CONFIG_TASKS_TRACE_RCU
3994 if (testtype == 3) {
3995 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n",
3996 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3997 if (deadlock && cyclelen == 1)
3998 pr_info("%s: Expect hang.\n", __func__);
3999 for (i = 0; i < cyclelen; i++) {
4000 char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock";
4001 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace"
4002 : "synchronize_srcu";
4003 char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock";
4004
4005 j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock);
4006 if (i == 0)
4007 rcu_read_lock_trace();
4008 else
4009 idx = srcu_read_lock(srcus[i]);
4010 if (j >= 0) {
4011 if (i == cyclelen - 1)
4012 synchronize_rcu_tasks_trace();
4013 else
4014 synchronize_srcu(srcus[j]);
4015 }
4016 if (i == 0)
4017 rcu_read_unlock_trace();
4018 else
4019 srcu_read_unlock(srcus[i], idx);
4020 }
4021 return;
4022 }
4023 #endif // #ifdef CONFIG_TASKS_TRACE_RCU
4024
4025 err_out:
4026 pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep);
4027 pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__);
4028 pr_info("%s: D: Deadlock if nonzero.\n", __func__);
4029 pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__);
4030 pr_info("%s: L: Cycle length.\n", __func__);
4031 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU))
4032 pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__);
4033 }
4034
4035 static int __init
rcu_torture_init(void)4036 rcu_torture_init(void)
4037 {
4038 long i;
4039 int cpu;
4040 int firsterr = 0;
4041 int flags = 0;
4042 unsigned long gp_seq = 0;
4043 static struct rcu_torture_ops *torture_ops[] = {
4044 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
4045 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
4046 &trivial_ops,
4047 };
4048
4049 if (!torture_init_begin(torture_type, verbose))
4050 return -EBUSY;
4051
4052 /* Process args and tell the world that the torturer is on the job. */
4053 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
4054 cur_ops = torture_ops[i];
4055 if (strcmp(torture_type, cur_ops->name) == 0)
4056 break;
4057 }
4058 if (i == ARRAY_SIZE(torture_ops)) {
4059 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
4060 torture_type);
4061 pr_alert("rcu-torture types:");
4062 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
4063 pr_cont(" %s", torture_ops[i]->name);
4064 pr_cont("\n");
4065 firsterr = -EINVAL;
4066 cur_ops = NULL;
4067 goto unwind;
4068 }
4069 if (cur_ops->fqs == NULL && fqs_duration != 0) {
4070 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
4071 fqs_duration = 0;
4072 }
4073 if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops ||
4074 !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) {
4075 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n",
4076 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU));
4077 nocbs_nthreads = 0;
4078 }
4079 if (cur_ops->init)
4080 cur_ops->init();
4081
4082 rcu_torture_init_srcu_lockdep();
4083
4084 if (nfakewriters >= 0) {
4085 nrealfakewriters = nfakewriters;
4086 } else {
4087 nrealfakewriters = num_online_cpus() - 2 - nfakewriters;
4088 if (nrealfakewriters <= 0)
4089 nrealfakewriters = 1;
4090 }
4091
4092 if (nreaders >= 0) {
4093 nrealreaders = nreaders;
4094 } else {
4095 nrealreaders = num_online_cpus() - 2 - nreaders;
4096 if (nrealreaders <= 0)
4097 nrealreaders = 1;
4098 }
4099 rcu_torture_print_module_parms(cur_ops, "Start of test");
4100 if (cur_ops->get_gp_data)
4101 cur_ops->get_gp_data(&flags, &gp_seq);
4102 start_gp_seq = gp_seq;
4103 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
4104 cur_ops->name, (long)gp_seq, flags);
4105
4106 /* Set up the freelist. */
4107
4108 INIT_LIST_HEAD(&rcu_torture_freelist);
4109 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
4110 rcu_tortures[i].rtort_mbtest = 0;
4111 list_add_tail(&rcu_tortures[i].rtort_free,
4112 &rcu_torture_freelist);
4113 }
4114
4115 /* Initialize the statistics so that each run gets its own numbers. */
4116
4117 rcu_torture_current = NULL;
4118 rcu_torture_current_version = 0;
4119 atomic_set(&n_rcu_torture_alloc, 0);
4120 atomic_set(&n_rcu_torture_alloc_fail, 0);
4121 atomic_set(&n_rcu_torture_free, 0);
4122 atomic_set(&n_rcu_torture_mberror, 0);
4123 atomic_set(&n_rcu_torture_mbchk_fail, 0);
4124 atomic_set(&n_rcu_torture_mbchk_tries, 0);
4125 atomic_set(&n_rcu_torture_error, 0);
4126 n_rcu_torture_barrier_error = 0;
4127 n_rcu_torture_boost_ktrerror = 0;
4128 n_rcu_torture_boost_failure = 0;
4129 n_rcu_torture_boosts = 0;
4130 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
4131 atomic_set(&rcu_torture_wcount[i], 0);
4132 for_each_possible_cpu(cpu) {
4133 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
4134 per_cpu(rcu_torture_count, cpu)[i] = 0;
4135 per_cpu(rcu_torture_batch, cpu)[i] = 0;
4136 }
4137 }
4138 err_segs_recorded = 0;
4139 rt_read_nsegs = 0;
4140
4141 /* Start up the kthreads. */
4142
4143 rcu_torture_write_types();
4144 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
4145 writer_task);
4146 if (torture_init_error(firsterr))
4147 goto unwind;
4148
4149 if (nrealfakewriters > 0) {
4150 fakewriter_tasks = kcalloc(nrealfakewriters,
4151 sizeof(fakewriter_tasks[0]),
4152 GFP_KERNEL);
4153 if (fakewriter_tasks == NULL) {
4154 TOROUT_ERRSTRING("out of memory");
4155 firsterr = -ENOMEM;
4156 goto unwind;
4157 }
4158 }
4159 for (i = 0; i < nrealfakewriters; i++) {
4160 firsterr = torture_create_kthread(rcu_torture_fakewriter,
4161 NULL, fakewriter_tasks[i]);
4162 if (torture_init_error(firsterr))
4163 goto unwind;
4164 }
4165 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
4166 GFP_KERNEL);
4167 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
4168 GFP_KERNEL);
4169 if (!reader_tasks || !rcu_torture_reader_mbchk) {
4170 TOROUT_ERRSTRING("out of memory");
4171 firsterr = -ENOMEM;
4172 goto unwind;
4173 }
4174 for (i = 0; i < nrealreaders; i++) {
4175 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
4176 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
4177 reader_tasks[i]);
4178 if (torture_init_error(firsterr))
4179 goto unwind;
4180 }
4181 nrealnocbers = nocbs_nthreads;
4182 if (WARN_ON(nrealnocbers < 0))
4183 nrealnocbers = 1;
4184 if (WARN_ON(nocbs_toggle < 0))
4185 nocbs_toggle = HZ;
4186 if (nrealnocbers > 0) {
4187 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
4188 if (nocb_tasks == NULL) {
4189 TOROUT_ERRSTRING("out of memory");
4190 firsterr = -ENOMEM;
4191 goto unwind;
4192 }
4193 } else {
4194 nocb_tasks = NULL;
4195 }
4196 for (i = 0; i < nrealnocbers; i++) {
4197 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
4198 if (torture_init_error(firsterr))
4199 goto unwind;
4200 }
4201 if (stat_interval > 0) {
4202 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
4203 stats_task);
4204 if (torture_init_error(firsterr))
4205 goto unwind;
4206 }
4207 if (test_no_idle_hz && shuffle_interval > 0) {
4208 firsterr = torture_shuffle_init(shuffle_interval * HZ);
4209 if (torture_init_error(firsterr))
4210 goto unwind;
4211 }
4212 if (stutter < 0)
4213 stutter = 0;
4214 if (stutter) {
4215 int t;
4216
4217 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
4218 firsterr = torture_stutter_init(stutter * HZ, t);
4219 if (torture_init_error(firsterr))
4220 goto unwind;
4221 }
4222 if (fqs_duration < 0)
4223 fqs_duration = 0;
4224 if (fqs_holdoff < 0)
4225 fqs_holdoff = 0;
4226 if (fqs_duration && fqs_holdoff) {
4227 /* Create the fqs thread */
4228 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
4229 fqs_task);
4230 if (torture_init_error(firsterr))
4231 goto unwind;
4232 }
4233 if (test_boost_interval < 1)
4234 test_boost_interval = 1;
4235 if (test_boost_duration < 2)
4236 test_boost_duration = 2;
4237 if (rcu_torture_can_boost()) {
4238
4239 boost_starttime = jiffies + test_boost_interval * HZ;
4240
4241 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
4242 rcutorture_booster_init,
4243 rcutorture_booster_cleanup);
4244 rcutor_hp = firsterr;
4245 if (torture_init_error(firsterr))
4246 goto unwind;
4247 }
4248 shutdown_jiffies = jiffies + shutdown_secs * HZ;
4249 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
4250 if (torture_init_error(firsterr))
4251 goto unwind;
4252 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
4253 rcutorture_sync);
4254 if (torture_init_error(firsterr))
4255 goto unwind;
4256 firsterr = rcu_torture_stall_init();
4257 if (torture_init_error(firsterr))
4258 goto unwind;
4259 firsterr = rcu_torture_fwd_prog_init();
4260 if (torture_init_error(firsterr))
4261 goto unwind;
4262 firsterr = rcu_torture_barrier_init();
4263 if (torture_init_error(firsterr))
4264 goto unwind;
4265 firsterr = rcu_torture_read_exit_init();
4266 if (torture_init_error(firsterr))
4267 goto unwind;
4268 if (preempt_duration > 0) {
4269 firsterr = torture_create_kthread(rcu_torture_preempt, NULL, preempt_task);
4270 if (torture_init_error(firsterr))
4271 goto unwind;
4272 }
4273 if (object_debug)
4274 rcu_test_debug_objects();
4275 torture_init_end();
4276 if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister))
4277 cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay);
4278 return 0;
4279
4280 unwind:
4281 torture_init_end();
4282 rcu_torture_cleanup();
4283 if (shutdown_secs) {
4284 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
4285 kernel_power_off();
4286 }
4287 return firsterr;
4288 }
4289
4290 module_init(rcu_torture_init);
4291 module_exit(rcu_torture_cleanup);
4292